[intelxl] Allow for arbitrary placement of ring tail registers
[ipxe.git] / src / drivers / net / intelxl.c
1 /*
2 * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <byteswap.h>
32 #include <ipxe/netdevice.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/if_ether.h>
35 #include <ipxe/vlan.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/malloc.h>
38 #include <ipxe/pci.h>
39 #include <ipxe/version.h>
40 #include "intelxl.h"
41
42 /** @file
43 *
44 * Intel 40 Gigabit Ethernet network card driver
45 *
46 */
47
48 static void intelxl_reopen_admin ( struct intelxl_nic *intelxl );
49
50 /******************************************************************************
51 *
52 * Device reset
53 *
54 ******************************************************************************
55 */
56
57 /**
58 * Reset hardware
59 *
60 * @v intelxl Intel device
61 * @ret rc Return status code
62 */
63 static int intelxl_reset ( struct intelxl_nic *intelxl ) {
64 uint32_t pfgen_ctrl;
65
66 /* Perform a global software reset */
67 pfgen_ctrl = readl ( intelxl->regs + INTELXL_PFGEN_CTRL );
68 writel ( ( pfgen_ctrl | INTELXL_PFGEN_CTRL_PFSWR ),
69 intelxl->regs + INTELXL_PFGEN_CTRL );
70 mdelay ( INTELXL_RESET_DELAY_MS );
71
72 return 0;
73 }
74
75 /******************************************************************************
76 *
77 * MAC address
78 *
79 ******************************************************************************
80 */
81
82 /**
83 * Fetch initial MAC address and maximum frame size
84 *
85 * @v intelxl Intel device
86 * @v netdev Network device
87 * @ret rc Return status code
88 */
89 static int intelxl_fetch_mac ( struct intelxl_nic *intelxl,
90 struct net_device *netdev ) {
91 union intelxl_receive_address mac;
92 uint32_t prtgl_sal;
93 uint32_t prtgl_sah;
94 size_t mfs;
95
96 /* Read NVM-loaded address */
97 prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL );
98 prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH );
99 mac.reg.low = cpu_to_le32 ( prtgl_sal );
100 mac.reg.high = cpu_to_le32 ( prtgl_sah );
101
102 /* Check that address is valid */
103 if ( ! is_valid_ether_addr ( mac.raw ) ) {
104 DBGC ( intelxl, "INTELXL %p has invalid MAC address (%s)\n",
105 intelxl, eth_ntoa ( mac.raw ) );
106 return -ENOENT;
107 }
108
109 /* Copy MAC address */
110 DBGC ( intelxl, "INTELXL %p has autoloaded MAC address %s\n",
111 intelxl, eth_ntoa ( mac.raw ) );
112 memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN );
113
114 /* Get maximum frame size */
115 mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah );
116 netdev->max_pkt_len = ( mfs - 4 /* CRC */ );
117
118 return 0;
119 }
120
121 /******************************************************************************
122 *
123 * Admin queue
124 *
125 ******************************************************************************
126 */
127
128 /** Admin queue register offsets */
129 static const struct intelxl_admin_offsets intelxl_admin_offsets = {
130 .bal = INTELXL_ADMIN_BAL,
131 .bah = INTELXL_ADMIN_BAH,
132 .len = INTELXL_ADMIN_LEN,
133 .head = INTELXL_ADMIN_HEAD,
134 .tail = INTELXL_ADMIN_TAIL,
135 };
136
137 /**
138 * Allocate admin queue
139 *
140 * @v intelxl Intel device
141 * @v admin Admin queue
142 * @ret rc Return status code
143 */
144 static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
145 struct intelxl_admin *admin ) {
146 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
147 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
148
149 /* Allocate admin queue */
150 admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
151 if ( ! admin->buf )
152 return -ENOMEM;
153 admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
154
155 DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf "
156 "[%08llx,%08llx)\n", intelxl,
157 ( ( admin == &intelxl->command ) ? 'T' : 'R' ),
158 ( ( unsigned long long ) virt_to_bus ( admin->desc ) ),
159 ( ( unsigned long long ) ( virt_to_bus ( admin->desc ) + len ) ),
160 ( ( unsigned long long ) virt_to_bus ( admin->buf ) ),
161 ( ( unsigned long long ) ( virt_to_bus ( admin->buf ) +
162 buf_len ) ) );
163 return 0;
164 }
165
166 /**
167 * Enable admin queue
168 *
169 * @v intelxl Intel device
170 * @v admin Admin queue
171 */
172 static void intelxl_enable_admin ( struct intelxl_nic *intelxl,
173 struct intelxl_admin *admin ) {
174 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
175 const struct intelxl_admin_offsets *regs = admin->regs;
176 void *admin_regs = ( intelxl->regs + admin->base );
177 physaddr_t address;
178
179 /* Initialise admin queue */
180 memset ( admin->desc, 0, len );
181
182 /* Reset head and tail registers */
183 writel ( 0, admin_regs + regs->head );
184 writel ( 0, admin_regs + regs->tail );
185
186 /* Reset queue index */
187 admin->index = 0;
188
189 /* Program queue address */
190 address = virt_to_bus ( admin->desc );
191 writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal );
192 if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
193 writel ( ( ( ( uint64_t ) address ) >> 32 ),
194 admin_regs + regs->bah );
195 } else {
196 writel ( 0, admin_regs + regs->bah );
197 }
198
199 /* Program queue length and enable queue */
200 writel ( ( INTELXL_ADMIN_LEN_LEN ( INTELXL_ADMIN_NUM_DESC ) |
201 INTELXL_ADMIN_LEN_ENABLE ),
202 admin_regs + regs->len );
203 }
204
205 /**
206 * Disable admin queue
207 *
208 * @v intelxl Intel device
209 * @v admin Admin queue
210 */
211 static void intelxl_disable_admin ( struct intelxl_nic *intelxl,
212 struct intelxl_admin *admin ) {
213 const struct intelxl_admin_offsets *regs = admin->regs;
214 void *admin_regs = ( intelxl->regs + admin->base );
215
216 /* Disable queue */
217 writel ( 0, admin_regs + regs->len );
218 }
219
220 /**
221 * Free admin queue
222 *
223 * @v intelxl Intel device
224 * @v admin Admin queue
225 */
226 static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused,
227 struct intelxl_admin *admin ) {
228 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
229 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
230
231 /* Free queue */
232 free_dma ( admin->buf, ( buf_len + len ) );
233 }
234
235 /**
236 * Get next admin command queue descriptor
237 *
238 * @v intelxl Intel device
239 * @ret cmd Command descriptor
240 */
241 static struct intelxl_admin_descriptor *
242 intelxl_admin_command_descriptor ( struct intelxl_nic *intelxl ) {
243 struct intelxl_admin *admin = &intelxl->command;
244 struct intelxl_admin_descriptor *cmd;
245
246 /* Get and initialise next descriptor */
247 cmd = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
248 memset ( cmd, 0, sizeof ( *cmd ) );
249 return cmd;
250 }
251
252 /**
253 * Get next admin command queue data buffer
254 *
255 * @v intelxl Intel device
256 * @ret buf Data buffer
257 */
258 static union intelxl_admin_buffer *
259 intelxl_admin_command_buffer ( struct intelxl_nic *intelxl ) {
260 struct intelxl_admin *admin = &intelxl->command;
261 union intelxl_admin_buffer *buf;
262
263 /* Get next data buffer */
264 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
265 memset ( buf, 0, sizeof ( *buf ) );
266 return buf;
267 }
268
269 /**
270 * Initialise admin event queue descriptor
271 *
272 * @v intelxl Intel device
273 * @v index Event queue index
274 */
275 static void intelxl_admin_event_init ( struct intelxl_nic *intelxl,
276 unsigned int index ) {
277 struct intelxl_admin *admin = &intelxl->event;
278 struct intelxl_admin_descriptor *evt;
279 union intelxl_admin_buffer *buf;
280 uint64_t address;
281
282 /* Initialise descriptor */
283 evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
284 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
285 address = virt_to_bus ( buf );
286 evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
287 evt->len = cpu_to_le16 ( sizeof ( *buf ) );
288 evt->params.buffer.high = cpu_to_le32 ( address >> 32 );
289 evt->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
290 }
291
292 /**
293 * Issue admin queue command
294 *
295 * @v intelxl Intel device
296 * @ret rc Return status code
297 */
298 static int intelxl_admin_command ( struct intelxl_nic *intelxl ) {
299 struct intelxl_admin *admin = &intelxl->command;
300 const struct intelxl_admin_offsets *regs = admin->regs;
301 void *admin_regs = ( intelxl->regs + admin->base );
302 struct intelxl_admin_descriptor *cmd;
303 union intelxl_admin_buffer *buf;
304 uint64_t address;
305 uint32_t cookie;
306 unsigned int index;
307 unsigned int tail;
308 unsigned int i;
309 int rc;
310
311 /* Get next queue entry */
312 index = admin->index++;
313 tail = ( admin->index % INTELXL_ADMIN_NUM_DESC );
314 cmd = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
315 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
316 DBGC2 ( intelxl, "INTELXL %p admin command %#x opcode %#04x",
317 intelxl, index, le16_to_cpu ( cmd->opcode ) );
318 if ( cmd->vopcode )
319 DBGC2 ( intelxl, "/%#08x", le32_to_cpu ( cmd->vopcode ) );
320 DBGC2 ( intelxl, ":\n" );
321
322 /* Sanity checks */
323 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_DD ) ) );
324 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_CMP ) ) );
325 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_ERR ) ) );
326 assert ( cmd->ret == 0 );
327
328 /* Populate data buffer address if applicable */
329 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
330 address = virt_to_bus ( buf );
331 cmd->params.buffer.high = cpu_to_le32 ( address >> 32 );
332 cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
333 }
334
335 /* Populate cookie, if not being (ab)used for VF opcode */
336 if ( ! cmd->vopcode )
337 cmd->cookie = cpu_to_le32 ( index );
338
339 /* Record cookie */
340 cookie = cmd->cookie;
341
342 /* Post command descriptor */
343 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
344 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
345 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
346 le16_to_cpu ( cmd->len ) );
347 }
348 wmb();
349 writel ( tail, admin_regs + regs->tail );
350
351 /* Wait for completion */
352 for ( i = 0 ; i < INTELXL_ADMIN_MAX_WAIT_MS ; i++ ) {
353
354 /* If response is not complete, delay 1ms and retry */
355 if ( ! ( cmd->flags & INTELXL_ADMIN_FL_DD ) ) {
356 mdelay ( 1 );
357 continue;
358 }
359 DBGC2 ( intelxl, "INTELXL %p admin command %#x response:\n",
360 intelxl, index );
361 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd,
362 sizeof ( *cmd ) );
363
364 /* Check for cookie mismatch */
365 if ( cmd->cookie != cookie ) {
366 DBGC ( intelxl, "INTELXL %p admin command %#x bad "
367 "cookie %#x\n", intelxl, index,
368 le32_to_cpu ( cmd->cookie ) );
369 rc = -EPROTO;
370 goto err;
371 }
372
373 /* Check for errors */
374 if ( cmd->ret != 0 ) {
375 DBGC ( intelxl, "INTELXL %p admin command %#x error "
376 "%d\n", intelxl, index,
377 le16_to_cpu ( cmd->ret ) );
378 rc = -EIO;
379 goto err;
380 }
381
382 /* Success */
383 return 0;
384 }
385
386 rc = -ETIMEDOUT;
387 DBGC ( intelxl, "INTELXL %p timed out waiting for admin command %#x:\n",
388 intelxl, index );
389 err:
390 DBGC_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
391 return rc;
392 }
393
394 /**
395 * Get firmware version
396 *
397 * @v intelxl Intel device
398 * @ret rc Return status code
399 */
400 static int intelxl_admin_version ( struct intelxl_nic *intelxl ) {
401 struct intelxl_admin_descriptor *cmd;
402 struct intelxl_admin_version_params *version;
403 unsigned int api;
404 int rc;
405
406 /* Populate descriptor */
407 cmd = intelxl_admin_command_descriptor ( intelxl );
408 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VERSION );
409 version = &cmd->params.version;
410
411 /* Issue command */
412 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
413 return rc;
414 api = le16_to_cpu ( version->api.major );
415 DBGC ( intelxl, "INTELXL %p firmware v%d.%d API v%d.%d\n",
416 intelxl, le16_to_cpu ( version->firmware.major ),
417 le16_to_cpu ( version->firmware.minor ),
418 api, le16_to_cpu ( version->api.minor ) );
419
420 /* Check for API compatibility */
421 if ( api > INTELXL_ADMIN_API_MAJOR ) {
422 DBGC ( intelxl, "INTELXL %p unsupported API v%d\n",
423 intelxl, api );
424 return -ENOTSUP;
425 }
426
427 return 0;
428 }
429
430 /**
431 * Report driver version
432 *
433 * @v intelxl Intel device
434 * @ret rc Return status code
435 */
436 static int intelxl_admin_driver ( struct intelxl_nic *intelxl ) {
437 struct intelxl_admin_descriptor *cmd;
438 struct intelxl_admin_driver_params *driver;
439 union intelxl_admin_buffer *buf;
440 int rc;
441
442 /* Populate descriptor */
443 cmd = intelxl_admin_command_descriptor ( intelxl );
444 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_DRIVER );
445 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF );
446 cmd->len = cpu_to_le16 ( sizeof ( buf->driver ) );
447 driver = &cmd->params.driver;
448 driver->major = product_major_version;
449 driver->minor = product_minor_version;
450 buf = intelxl_admin_command_buffer ( intelxl );
451 snprintf ( buf->driver.name, sizeof ( buf->driver.name ), "%s",
452 ( product_name[0] ? product_name : product_short_name ) );
453
454 /* Issue command */
455 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
456 return rc;
457
458 return 0;
459 }
460
461 /**
462 * Shutdown admin queues
463 *
464 * @v intelxl Intel device
465 * @ret rc Return status code
466 */
467 static int intelxl_admin_shutdown ( struct intelxl_nic *intelxl ) {
468 struct intelxl_admin_descriptor *cmd;
469 struct intelxl_admin_shutdown_params *shutdown;
470 int rc;
471
472 /* Populate descriptor */
473 cmd = intelxl_admin_command_descriptor ( intelxl );
474 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SHUTDOWN );
475 shutdown = &cmd->params.shutdown;
476 shutdown->unloading = INTELXL_ADMIN_SHUTDOWN_UNLOADING;
477
478 /* Issue command */
479 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
480 return rc;
481
482 return 0;
483 }
484
485 /**
486 * Get switch configuration
487 *
488 * @v intelxl Intel device
489 * @ret rc Return status code
490 */
491 static int intelxl_admin_switch ( struct intelxl_nic *intelxl ) {
492 struct intelxl_admin_descriptor *cmd;
493 struct intelxl_admin_switch_params *sw;
494 union intelxl_admin_buffer *buf;
495 int rc;
496
497 /* Populate descriptor */
498 cmd = intelxl_admin_command_descriptor ( intelxl );
499 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SWITCH );
500 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
501 cmd->len = cpu_to_le16 ( sizeof ( buf->sw ) );
502 sw = &cmd->params.sw;
503 buf = intelxl_admin_command_buffer ( intelxl );
504
505 /* Get each configuration in turn */
506 do {
507 /* Issue command */
508 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
509 return rc;
510
511 /* Dump raw configuration */
512 DBGC2 ( intelxl, "INTELXL %p SEID %#04x:\n",
513 intelxl, le16_to_cpu ( buf->sw.cfg.seid ) );
514 DBGC2_HDA ( intelxl, 0, &buf->sw.cfg, sizeof ( buf->sw.cfg ) );
515
516 /* Parse response */
517 if ( buf->sw.cfg.type == INTELXL_ADMIN_SWITCH_TYPE_VSI ) {
518 intelxl->vsi = le16_to_cpu ( buf->sw.cfg.seid );
519 DBGC ( intelxl, "INTELXL %p VSI %#04x uplink %#04x "
520 "downlink %#04x conn %#02x\n", intelxl,
521 intelxl->vsi, le16_to_cpu ( buf->sw.cfg.uplink ),
522 le16_to_cpu ( buf->sw.cfg.downlink ),
523 buf->sw.cfg.connection );
524 }
525
526 } while ( sw->next );
527
528 /* Check that we found a VSI */
529 if ( ! intelxl->vsi ) {
530 DBGC ( intelxl, "INTELXL %p has no VSI\n", intelxl );
531 return -ENOENT;
532 }
533
534 return 0;
535 }
536
537 /**
538 * Get VSI parameters
539 *
540 * @v intelxl Intel device
541 * @ret rc Return status code
542 */
543 static int intelxl_admin_vsi ( struct intelxl_nic *intelxl ) {
544 struct intelxl_admin_descriptor *cmd;
545 struct intelxl_admin_vsi_params *vsi;
546 union intelxl_admin_buffer *buf;
547 int rc;
548
549 /* Populate descriptor */
550 cmd = intelxl_admin_command_descriptor ( intelxl );
551 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VSI );
552 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
553 cmd->len = cpu_to_le16 ( sizeof ( buf->vsi ) );
554 vsi = &cmd->params.vsi;
555 vsi->vsi = cpu_to_le16 ( intelxl->vsi );
556 buf = intelxl_admin_command_buffer ( intelxl );
557
558 /* Issue command */
559 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
560 return rc;
561
562 /* Parse response */
563 intelxl->queue = le16_to_cpu ( buf->vsi.queue[0] );
564 intelxl->qset = le16_to_cpu ( buf->vsi.qset[0] );
565 DBGC ( intelxl, "INTELXL %p VSI %#04x queue %#04x qset %#04x\n",
566 intelxl, intelxl->vsi, intelxl->queue, intelxl->qset );
567
568 return 0;
569 }
570
571 /**
572 * Set VSI promiscuous modes
573 *
574 * @v intelxl Intel device
575 * @ret rc Return status code
576 */
577 static int intelxl_admin_promisc ( struct intelxl_nic *intelxl ) {
578 struct intelxl_admin_descriptor *cmd;
579 struct intelxl_admin_promisc_params *promisc;
580 uint16_t flags;
581 int rc;
582
583 /* Populate descriptor */
584 cmd = intelxl_admin_command_descriptor ( intelxl );
585 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_PROMISC );
586 flags = ( INTELXL_ADMIN_PROMISC_FL_UNICAST |
587 INTELXL_ADMIN_PROMISC_FL_MULTICAST |
588 INTELXL_ADMIN_PROMISC_FL_BROADCAST |
589 INTELXL_ADMIN_PROMISC_FL_VLAN );
590 promisc = &cmd->params.promisc;
591 promisc->flags = cpu_to_le16 ( flags );
592 promisc->valid = cpu_to_le16 ( flags );
593 promisc->vsi = cpu_to_le16 ( intelxl->vsi );
594
595 /* Issue command */
596 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
597 return rc;
598
599 return 0;
600 }
601
602 /**
603 * Restart autonegotiation
604 *
605 * @v intelxl Intel device
606 * @ret rc Return status code
607 */
608 static int intelxl_admin_autoneg ( struct intelxl_nic *intelxl ) {
609 struct intelxl_admin_descriptor *cmd;
610 struct intelxl_admin_autoneg_params *autoneg;
611 int rc;
612
613 /* Populate descriptor */
614 cmd = intelxl_admin_command_descriptor ( intelxl );
615 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_AUTONEG );
616 autoneg = &cmd->params.autoneg;
617 autoneg->flags = ( INTELXL_ADMIN_AUTONEG_FL_RESTART |
618 INTELXL_ADMIN_AUTONEG_FL_ENABLE );
619
620 /* Issue command */
621 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
622 return rc;
623
624 return 0;
625 }
626
627 /**
628 * Get link status
629 *
630 * @v netdev Network device
631 * @ret rc Return status code
632 */
633 static int intelxl_admin_link ( struct net_device *netdev ) {
634 struct intelxl_nic *intelxl = netdev->priv;
635 struct intelxl_admin_descriptor *cmd;
636 struct intelxl_admin_link_params *link;
637 int rc;
638
639 /* Populate descriptor */
640 cmd = intelxl_admin_command_descriptor ( intelxl );
641 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_LINK );
642 link = &cmd->params.link;
643 link->notify = INTELXL_ADMIN_LINK_NOTIFY;
644
645 /* Issue command */
646 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
647 return rc;
648 DBGC ( intelxl, "INTELXL %p PHY %#02x speed %#02x status %#02x\n",
649 intelxl, link->phy, link->speed, link->status );
650
651 /* Update network device */
652 if ( link->status & INTELXL_ADMIN_LINK_UP ) {
653 netdev_link_up ( netdev );
654 } else {
655 netdev_link_down ( netdev );
656 }
657
658 return 0;
659 }
660
661 /**
662 * Handle virtual function event (when VF driver is not present)
663 *
664 * @v netdev Network device
665 * @v evt Admin queue event descriptor
666 * @v buf Admin queue event data buffer
667 */
668 __weak void
669 intelxlvf_admin_event ( struct net_device *netdev __unused,
670 struct intelxl_admin_descriptor *evt __unused,
671 union intelxl_admin_buffer *buf __unused ) {
672
673 /* Nothing to do */
674 }
675
676 /**
677 * Refill admin event queue
678 *
679 * @v intelxl Intel device
680 */
681 static void intelxl_refill_admin ( struct intelxl_nic *intelxl ) {
682 struct intelxl_admin *admin = &intelxl->event;
683 const struct intelxl_admin_offsets *regs = admin->regs;
684 void *admin_regs = ( intelxl->regs + admin->base );
685 unsigned int tail;
686
687 /* Update tail pointer */
688 tail = ( ( admin->index + INTELXL_ADMIN_NUM_DESC - 1 ) %
689 INTELXL_ADMIN_NUM_DESC );
690 wmb();
691 writel ( tail, admin_regs + regs->tail );
692 }
693
694 /**
695 * Poll admin event queue
696 *
697 * @v netdev Network device
698 */
699 static void intelxl_poll_admin ( struct net_device *netdev ) {
700 struct intelxl_nic *intelxl = netdev->priv;
701 struct intelxl_admin *admin = &intelxl->event;
702 struct intelxl_admin_descriptor *evt;
703 union intelxl_admin_buffer *buf;
704
705 /* Check for events */
706 while ( 1 ) {
707
708 /* Get next event descriptor and data buffer */
709 evt = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
710 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
711
712 /* Stop if descriptor is not yet completed */
713 if ( ! ( evt->flags & INTELXL_ADMIN_FL_DD ) )
714 return;
715 DBGC2 ( intelxl, "INTELXL %p admin event %#x:\n",
716 intelxl, admin->index );
717 DBGC2_HDA ( intelxl, virt_to_phys ( evt ), evt,
718 sizeof ( *evt ) );
719 if ( evt->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
720 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
721 le16_to_cpu ( evt->len ) );
722 }
723
724 /* Handle event */
725 switch ( evt->opcode ) {
726 case cpu_to_le16 ( INTELXL_ADMIN_LINK ):
727 intelxl_admin_link ( netdev );
728 break;
729 case cpu_to_le16 ( INTELXL_ADMIN_SEND_TO_VF ):
730 intelxlvf_admin_event ( netdev, evt, buf );
731 break;
732 default:
733 DBGC ( intelxl, "INTELXL %p admin event %#x "
734 "unrecognised opcode %#04x\n", intelxl,
735 admin->index, le16_to_cpu ( evt->opcode ) );
736 break;
737 }
738
739 /* Reset descriptor and refill queue */
740 intelxl_admin_event_init ( intelxl, admin->index );
741 admin->index++;
742 intelxl_refill_admin ( intelxl );
743 }
744 }
745
746 /**
747 * Open admin queues
748 *
749 * @v intelxl Intel device
750 * @ret rc Return status code
751 */
752 static int intelxl_open_admin ( struct intelxl_nic *intelxl ) {
753 int rc;
754
755 /* Allocate admin event queue */
756 if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->event ) ) != 0 )
757 goto err_alloc_event;
758
759 /* Allocate admin command queue */
760 if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->command ) ) != 0 )
761 goto err_alloc_command;
762
763 /* (Re)open admin queues */
764 intelxl_reopen_admin ( intelxl );
765
766 /* Get firmware version */
767 if ( ( rc = intelxl_admin_version ( intelxl ) ) != 0 )
768 goto err_version;
769
770 /* Report driver version */
771 if ( ( rc = intelxl_admin_driver ( intelxl ) ) != 0 )
772 goto err_driver;
773
774 return 0;
775
776 err_driver:
777 err_version:
778 intelxl_disable_admin ( intelxl, &intelxl->command );
779 intelxl_disable_admin ( intelxl, &intelxl->event );
780 intelxl_free_admin ( intelxl, &intelxl->command );
781 err_alloc_command:
782 intelxl_free_admin ( intelxl, &intelxl->event );
783 err_alloc_event:
784 return rc;
785 }
786
787 /**
788 * Reopen admin queues (after virtual function reset)
789 *
790 * @v intelxl Intel device
791 */
792 static void intelxl_reopen_admin ( struct intelxl_nic *intelxl ) {
793 unsigned int i;
794
795 /* Enable admin event queue */
796 intelxl_enable_admin ( intelxl, &intelxl->event );
797
798 /* Enable admin command queue */
799 intelxl_enable_admin ( intelxl, &intelxl->command );
800
801 /* Initialise all admin event queue descriptors */
802 for ( i = 0 ; i < INTELXL_ADMIN_NUM_DESC ; i++ )
803 intelxl_admin_event_init ( intelxl, i );
804
805 /* Post all descriptors to event queue */
806 intelxl_refill_admin ( intelxl );
807 }
808
809 /**
810 * Close admin queues
811 *
812 * @v intelxl Intel device
813 */
814 static void intelxl_close_admin ( struct intelxl_nic *intelxl ) {
815
816 /* Shut down admin queues */
817 intelxl_admin_shutdown ( intelxl );
818
819 /* Disable admin queues */
820 intelxl_disable_admin ( intelxl, &intelxl->command );
821 intelxl_disable_admin ( intelxl, &intelxl->event );
822
823 /* Free admin queues */
824 intelxl_free_admin ( intelxl, &intelxl->command );
825 intelxl_free_admin ( intelxl, &intelxl->event );
826 }
827
828 /******************************************************************************
829 *
830 * Descriptor rings
831 *
832 ******************************************************************************
833 */
834
835 /**
836 * Dump queue context (for debugging)
837 *
838 * @v intelxl Intel device
839 * @v op Context operation
840 * @v len Size of context
841 */
842 static __attribute__ (( unused )) void
843 intelxl_context_dump ( struct intelxl_nic *intelxl, uint32_t op, size_t len ) {
844 struct intelxl_context_line line;
845 uint32_t pfcm_lanctxctl;
846 uint32_t pfcm_lanctxstat;
847 unsigned int queue;
848 unsigned int index;
849 unsigned int i;
850
851 /* Do nothing unless debug output is enabled */
852 if ( ! DBG_EXTRA )
853 return;
854
855 /* Dump context */
856 DBGC2 ( intelxl, "INTELXL %p context %#08x:\n", intelxl, op );
857 for ( index = 0 ; ( sizeof ( line ) * index ) < len ; index++ ) {
858
859 /* Start context operation */
860 queue = ( intelxl->base + intelxl->queue );
861 pfcm_lanctxctl =
862 ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
863 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
864 INTELXL_PFCM_LANCTXCTL_OP_CODE_READ | op );
865 writel ( pfcm_lanctxctl,
866 intelxl->regs + INTELXL_PFCM_LANCTXCTL );
867
868 /* Wait for operation to complete */
869 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
870
871 /* Check if operation is complete */
872 pfcm_lanctxstat = readl ( intelxl->regs +
873 INTELXL_PFCM_LANCTXSTAT );
874 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
875 break;
876
877 /* Delay */
878 mdelay ( 1 );
879 }
880
881 /* Read context data */
882 for ( i = 0 ; i < ( sizeof ( line ) /
883 sizeof ( line.raw[0] ) ) ; i++ ) {
884 line.raw[i] = readl ( intelxl->regs +
885 INTELXL_PFCM_LANCTXDATA ( i ) );
886 }
887 DBGC2_HDA ( intelxl, ( sizeof ( line ) * index ),
888 &line, sizeof ( line ) );
889 }
890 }
891
892 /**
893 * Program queue context line
894 *
895 * @v intelxl Intel device
896 * @v line Queue context line
897 * @v index Line number
898 * @v op Context operation
899 * @ret rc Return status code
900 */
901 static int intelxl_context_line ( struct intelxl_nic *intelxl,
902 struct intelxl_context_line *line,
903 unsigned int index, uint32_t op ) {
904 uint32_t pfcm_lanctxctl;
905 uint32_t pfcm_lanctxstat;
906 unsigned int queue;
907 unsigned int i;
908
909 /* Write context data */
910 for ( i = 0; i < ( sizeof ( *line ) / sizeof ( line->raw[0] ) ); i++ ) {
911 writel ( le32_to_cpu ( line->raw[i] ),
912 intelxl->regs + INTELXL_PFCM_LANCTXDATA ( i ) );
913 }
914
915 /* Start context operation */
916 queue = ( intelxl->base + intelxl->queue );
917 pfcm_lanctxctl = ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
918 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
919 INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE | op );
920 writel ( pfcm_lanctxctl, intelxl->regs + INTELXL_PFCM_LANCTXCTL );
921
922 /* Wait for operation to complete */
923 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
924
925 /* Check if operation is complete */
926 pfcm_lanctxstat = readl ( intelxl->regs +
927 INTELXL_PFCM_LANCTXSTAT );
928 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
929 return 0;
930
931 /* Delay */
932 mdelay ( 1 );
933 }
934
935 DBGC ( intelxl, "INTELXL %p timed out waiting for context: %#08x\n",
936 intelxl, pfcm_lanctxctl );
937 return -ETIMEDOUT;
938 }
939
940 /**
941 * Program queue context
942 *
943 * @v intelxl Intel device
944 * @v line Queue context lines
945 * @v len Size of context
946 * @v op Context operation
947 * @ret rc Return status code
948 */
949 static int intelxl_context ( struct intelxl_nic *intelxl,
950 struct intelxl_context_line *line,
951 size_t len, uint32_t op ) {
952 unsigned int index;
953 int rc;
954
955 DBGC2 ( intelxl, "INTELXL %p context %#08x len %#zx:\n",
956 intelxl, op, len );
957 DBGC2_HDA ( intelxl, 0, line, len );
958
959 /* Program one line at a time */
960 for ( index = 0 ; ( sizeof ( *line ) * index ) < len ; index++ ) {
961 if ( ( rc = intelxl_context_line ( intelxl, line++, index,
962 op ) ) != 0 )
963 return rc;
964 }
965
966 return 0;
967 }
968
969 /**
970 * Program transmit queue context
971 *
972 * @v intelxl Intel device
973 * @v address Descriptor ring base address
974 * @ret rc Return status code
975 */
976 static int intelxl_context_tx ( struct intelxl_nic *intelxl,
977 physaddr_t address ) {
978 union {
979 struct intelxl_context_tx tx;
980 struct intelxl_context_line line;
981 } ctx;
982 int rc;
983
984 /* Initialise context */
985 memset ( &ctx, 0, sizeof ( ctx ) );
986 ctx.tx.flags = cpu_to_le16 ( INTELXL_CTX_TX_FL_NEW );
987 ctx.tx.base = cpu_to_le64 ( INTELXL_CTX_TX_BASE ( address ) );
988 ctx.tx.count =
989 cpu_to_le16 ( INTELXL_CTX_TX_COUNT ( INTELXL_TX_NUM_DESC ) );
990 ctx.tx.qset = INTELXL_CTX_TX_QSET ( intelxl->qset );
991
992 /* Program context */
993 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
994 INTELXL_PFCM_LANCTXCTL_TYPE_TX ) ) != 0 )
995 return rc;
996
997 return 0;
998 }
999
1000 /**
1001 * Program receive queue context
1002 *
1003 * @v intelxl Intel device
1004 * @v address Descriptor ring base address
1005 * @ret rc Return status code
1006 */
1007 static int intelxl_context_rx ( struct intelxl_nic *intelxl,
1008 physaddr_t address ) {
1009 union {
1010 struct intelxl_context_rx rx;
1011 struct intelxl_context_line line;
1012 } ctx;
1013 uint64_t base_count;
1014 int rc;
1015
1016 /* Initialise context */
1017 memset ( &ctx, 0, sizeof ( ctx ) );
1018 base_count = INTELXL_CTX_RX_BASE_COUNT ( address, INTELXL_RX_NUM_DESC );
1019 ctx.rx.base_count = cpu_to_le64 ( base_count );
1020 ctx.rx.len = cpu_to_le16 ( INTELXL_CTX_RX_LEN ( intelxl->mfs ) );
1021 ctx.rx.flags = ( INTELXL_CTX_RX_FL_DSIZE | INTELXL_CTX_RX_FL_CRCSTRIP );
1022 ctx.rx.mfs = cpu_to_le16 ( INTELXL_CTX_RX_MFS ( intelxl->mfs ) );
1023
1024 /* Program context */
1025 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
1026 INTELXL_PFCM_LANCTXCTL_TYPE_RX ) ) != 0 )
1027 return rc;
1028
1029 return 0;
1030 }
1031
1032 /**
1033 * Enable descriptor ring
1034 *
1035 * @v intelxl Intel device
1036 * @v ring Descriptor ring
1037 * @ret rc Return status code
1038 */
1039 static int intelxl_enable_ring ( struct intelxl_nic *intelxl,
1040 struct intelxl_ring *ring ) {
1041 void *ring_regs = ( intelxl->regs + ring->reg );
1042 uint32_t qxx_ena;
1043
1044 /* Enable ring */
1045 writel ( INTELXL_QXX_ENA_REQ, ( ring_regs + INTELXL_QXX_ENA ) );
1046 udelay ( INTELXL_QUEUE_ENABLE_DELAY_US );
1047 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1048 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) {
1049 DBGC ( intelxl, "INTELXL %p ring %06x failed to enable: "
1050 "%#08x\n", intelxl, ring->reg, qxx_ena );
1051 return -EIO;
1052 }
1053
1054 return 0;
1055 }
1056
1057 /**
1058 * Disable descriptor ring
1059 *
1060 * @v intelxl Intel device
1061 * @v ring Descriptor ring
1062 * @ret rc Return status code
1063 */
1064 static int intelxl_disable_ring ( struct intelxl_nic *intelxl,
1065 struct intelxl_ring *ring ) {
1066 void *ring_regs = ( intelxl->regs + ring->reg );
1067 uint32_t qxx_ena;
1068 unsigned int i;
1069
1070 /* Disable ring */
1071 writel ( 0, ( ring_regs + INTELXL_QXX_ENA ) );
1072
1073 /* Wait for ring to be disabled */
1074 for ( i = 0 ; i < INTELXL_QUEUE_DISABLE_MAX_WAIT_MS ; i++ ) {
1075
1076 /* Check if ring is disabled */
1077 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1078 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) )
1079 return 0;
1080
1081 /* Delay */
1082 mdelay ( 1 );
1083 }
1084
1085 DBGC ( intelxl, "INTELXL %p ring %06x timed out waiting for disable: "
1086 "%#08x\n", intelxl, ring->reg, qxx_ena );
1087 return -ETIMEDOUT;
1088 }
1089
1090 /**
1091 * Create descriptor ring
1092 *
1093 * @v intelxl Intel device
1094 * @v ring Descriptor ring
1095 * @ret rc Return status code
1096 */
1097 static int intelxl_create_ring ( struct intelxl_nic *intelxl,
1098 struct intelxl_ring *ring ) {
1099 physaddr_t address;
1100 int rc;
1101
1102 /* Allocate descriptor ring */
1103 ring->desc.raw = malloc_dma ( ring->len, INTELXL_ALIGN );
1104 if ( ! ring->desc.raw ) {
1105 rc = -ENOMEM;
1106 goto err_alloc;
1107 }
1108
1109 /* Initialise descriptor ring */
1110 memset ( ring->desc.raw, 0, ring->len );
1111
1112 /* Reset tail pointer */
1113 writel ( 0, ( intelxl->regs + ring->tail ) );
1114
1115 /* Program queue context */
1116 address = virt_to_bus ( ring->desc.raw );
1117 if ( ( rc = ring->context ( intelxl, address ) ) != 0 )
1118 goto err_context;
1119
1120 /* Enable ring */
1121 if ( ( rc = intelxl_enable_ring ( intelxl, ring ) ) != 0 )
1122 goto err_enable;
1123
1124 /* Reset counters */
1125 ring->prod = 0;
1126 ring->cons = 0;
1127
1128 DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n",
1129 intelxl, ( ring->reg + ring->tail ),
1130 ( ( unsigned long long ) address ),
1131 ( ( unsigned long long ) address + ring->len ) );
1132
1133 return 0;
1134
1135 intelxl_disable_ring ( intelxl, ring );
1136 err_enable:
1137 err_context:
1138 free_dma ( ring->desc.raw, ring->len );
1139 err_alloc:
1140 return rc;
1141 }
1142
1143 /**
1144 * Destroy descriptor ring
1145 *
1146 * @v intelxl Intel device
1147 * @v ring Descriptor ring
1148 */
1149 static void intelxl_destroy_ring ( struct intelxl_nic *intelxl,
1150 struct intelxl_ring *ring ) {
1151 int rc;
1152
1153 /* Disable ring */
1154 if ( ( rc = intelxl_disable_ring ( intelxl, ring ) ) != 0 ) {
1155 /* Leak memory; there's nothing else we can do */
1156 return;
1157 }
1158
1159 /* Free descriptor ring */
1160 free_dma ( ring->desc.raw, ring->len );
1161 ring->desc.raw = NULL;
1162 }
1163
1164 /**
1165 * Refill receive descriptor ring
1166 *
1167 * @v intelxl Intel device
1168 */
1169 static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
1170 struct intelxl_rx_data_descriptor *rx;
1171 struct io_buffer *iobuf;
1172 unsigned int rx_idx;
1173 unsigned int rx_tail;
1174 physaddr_t address;
1175 unsigned int refilled = 0;
1176
1177 /* Refill ring */
1178 while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) {
1179
1180 /* Allocate I/O buffer */
1181 iobuf = alloc_iob ( intelxl->mfs );
1182 if ( ! iobuf ) {
1183 /* Wait for next refill */
1184 break;
1185 }
1186
1187 /* Get next receive descriptor */
1188 rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC );
1189 rx = &intelxl->rx.desc.rx[rx_idx].data;
1190
1191 /* Populate receive descriptor */
1192 address = virt_to_bus ( iobuf->data );
1193 rx->address = cpu_to_le64 ( address );
1194 rx->flags = 0;
1195
1196 /* Record I/O buffer */
1197 assert ( intelxl->rx_iobuf[rx_idx] == NULL );
1198 intelxl->rx_iobuf[rx_idx] = iobuf;
1199
1200 DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl,
1201 rx_idx, ( ( unsigned long long ) address ),
1202 ( ( unsigned long long ) address + intelxl->mfs ) );
1203 refilled++;
1204 }
1205
1206 /* Push descriptors to card, if applicable */
1207 if ( refilled ) {
1208 wmb();
1209 rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC );
1210 writel ( rx_tail, ( intelxl->regs + intelxl->rx.tail ) );
1211 }
1212 }
1213
1214 /******************************************************************************
1215 *
1216 * Network device interface
1217 *
1218 ******************************************************************************
1219 */
1220
1221 /**
1222 * Open network device
1223 *
1224 * @v netdev Network device
1225 * @ret rc Return status code
1226 */
1227 static int intelxl_open ( struct net_device *netdev ) {
1228 struct intelxl_nic *intelxl = netdev->priv;
1229 union intelxl_receive_address mac;
1230 unsigned int queue;
1231 uint32_t prtgl_sal;
1232 uint32_t prtgl_sah;
1233 int rc;
1234
1235 /* Calculate maximum frame size */
1236 intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ +
1237 INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) );
1238
1239 /* Program MAC address and maximum frame size */
1240 memset ( &mac, 0, sizeof ( mac ) );
1241 memcpy ( mac.raw, netdev->ll_addr, sizeof ( mac.raw ) );
1242 prtgl_sal = le32_to_cpu ( mac.reg.low );
1243 prtgl_sah = ( le32_to_cpu ( mac.reg.high ) |
1244 INTELXL_PRTGL_SAH_MFS_SET ( intelxl->mfs ) );
1245 writel ( prtgl_sal, intelxl->regs + INTELXL_PRTGL_SAL );
1246 writel ( prtgl_sah, intelxl->regs + INTELXL_PRTGL_SAH );
1247
1248 /* Associate transmit queue to PF */
1249 writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF |
1250 INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ),
1251 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) );
1252
1253 /* Clear transmit pre queue disable */
1254 queue = ( intelxl->base + intelxl->queue );
1255 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS |
1256 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1257 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1258
1259 /* Reset transmit queue head */
1260 writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) );
1261
1262 /* Create receive descriptor ring */
1263 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 )
1264 goto err_create_rx;
1265
1266 /* Create transmit descriptor ring */
1267 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 )
1268 goto err_create_tx;
1269
1270 /* Fill receive ring */
1271 intelxl_refill_rx ( intelxl );
1272
1273 /* Restart autonegotiation */
1274 intelxl_admin_autoneg ( intelxl );
1275
1276 /* Update link state */
1277 intelxl_admin_link ( netdev );
1278
1279 return 0;
1280
1281 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1282 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1283 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1284 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1285 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1286 err_create_tx:
1287 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1288 err_create_rx:
1289 return rc;
1290 }
1291
1292 /**
1293 * Close network device
1294 *
1295 * @v netdev Network device
1296 */
1297 static void intelxl_close ( struct net_device *netdev ) {
1298 struct intelxl_nic *intelxl = netdev->priv;
1299 unsigned int queue;
1300 unsigned int i;
1301
1302 /* Dump contexts (for debugging) */
1303 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_TX,
1304 sizeof ( struct intelxl_context_tx ) );
1305 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_RX,
1306 sizeof ( struct intelxl_context_rx ) );
1307
1308 /* Pre-disable transmit queue */
1309 queue = ( intelxl->base + intelxl->queue );
1310 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1311 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1312 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1313 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1314
1315 /* Destroy transmit descriptor ring */
1316 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1317
1318 /* Destroy receive descriptor ring */
1319 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1320
1321 /* Discard any unused receive buffers */
1322 for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
1323 if ( intelxl->rx_iobuf[i] )
1324 free_iob ( intelxl->rx_iobuf[i] );
1325 intelxl->rx_iobuf[i] = NULL;
1326 }
1327 }
1328
1329 /**
1330 * Transmit packet
1331 *
1332 * @v netdev Network device
1333 * @v iobuf I/O buffer
1334 * @ret rc Return status code
1335 */
1336 static int intelxl_transmit ( struct net_device *netdev,
1337 struct io_buffer *iobuf ) {
1338 struct intelxl_nic *intelxl = netdev->priv;
1339 struct intelxl_tx_data_descriptor *tx;
1340 unsigned int tx_idx;
1341 unsigned int tx_tail;
1342 physaddr_t address;
1343 size_t len;
1344
1345 /* Get next transmit descriptor */
1346 if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) {
1347 DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n",
1348 intelxl );
1349 return -ENOBUFS;
1350 }
1351 tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC );
1352 tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC );
1353 tx = &intelxl->tx.desc.tx[tx_idx].data;
1354
1355 /* Populate transmit descriptor */
1356 address = virt_to_bus ( iobuf->data );
1357 len = iob_len ( iobuf );
1358 tx->address = cpu_to_le64 ( address );
1359 tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
1360 tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
1361 INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );
1362 wmb();
1363
1364 /* Notify card that there are packets ready to transmit */
1365 writel ( tx_tail, ( intelxl->regs + intelxl->tx.tail ) );
1366
1367 DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx,
1368 ( ( unsigned long long ) address ),
1369 ( ( unsigned long long ) address + len ) );
1370 return 0;
1371 }
1372
1373 /**
1374 * Poll for completed packets
1375 *
1376 * @v netdev Network device
1377 */
1378 static void intelxl_poll_tx ( struct net_device *netdev ) {
1379 struct intelxl_nic *intelxl = netdev->priv;
1380 struct intelxl_tx_writeback_descriptor *tx_wb;
1381 unsigned int tx_idx;
1382
1383 /* Check for completed packets */
1384 while ( intelxl->tx.cons != intelxl->tx.prod ) {
1385
1386 /* Get next transmit descriptor */
1387 tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC );
1388 tx_wb = &intelxl->tx.desc.tx[tx_idx].wb;
1389
1390 /* Stop if descriptor is still in use */
1391 if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) )
1392 return;
1393 DBGC2 ( intelxl, "INTELXL %p TX %d complete\n",
1394 intelxl, tx_idx );
1395
1396 /* Complete TX descriptor */
1397 netdev_tx_complete_next ( netdev );
1398 intelxl->tx.cons++;
1399 }
1400 }
1401
1402 /**
1403 * Poll for received packets
1404 *
1405 * @v netdev Network device
1406 */
1407 static void intelxl_poll_rx ( struct net_device *netdev ) {
1408 struct intelxl_nic *intelxl = netdev->priv;
1409 struct intelxl_rx_writeback_descriptor *rx_wb;
1410 struct io_buffer *iobuf;
1411 unsigned int rx_idx;
1412 unsigned int tag;
1413 size_t len;
1414
1415 /* Check for received packets */
1416 while ( intelxl->rx.cons != intelxl->rx.prod ) {
1417
1418 /* Get next receive descriptor */
1419 rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC );
1420 rx_wb = &intelxl->rx.desc.rx[rx_idx].wb;
1421
1422 /* Stop if descriptor is still in use */
1423 if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) )
1424 return;
1425
1426 /* Populate I/O buffer */
1427 iobuf = intelxl->rx_iobuf[rx_idx];
1428 intelxl->rx_iobuf[rx_idx] = NULL;
1429 len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) );
1430 iob_put ( iobuf, len );
1431
1432 /* Find VLAN device, if applicable */
1433 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_VLAN ) ) {
1434 tag = VLAN_TAG ( le16_to_cpu ( rx_wb->vlan ) );
1435 } else {
1436 tag = 0;
1437 }
1438
1439 /* Hand off to network stack */
1440 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_RXE ) ) {
1441 DBGC ( intelxl, "INTELXL %p RX %d error (length %zd, "
1442 "flags %08x)\n", intelxl, rx_idx, len,
1443 le32_to_cpu ( rx_wb->flags ) );
1444 vlan_netdev_rx_err ( netdev, tag, iobuf, -EIO );
1445 } else {
1446 DBGC2 ( intelxl, "INTELXL %p RX %d complete (length "
1447 "%zd)\n", intelxl, rx_idx, len );
1448 vlan_netdev_rx ( netdev, tag, iobuf );
1449 }
1450 intelxl->rx.cons++;
1451 }
1452 }
1453
1454 /**
1455 * Poll for completed and received packets
1456 *
1457 * @v netdev Network device
1458 */
1459 static void intelxl_poll ( struct net_device *netdev ) {
1460 struct intelxl_nic *intelxl = netdev->priv;
1461
1462 /* Acknowledge interrupts, if applicable */
1463 if ( netdev_irq_enabled ( netdev ) ) {
1464 writel ( ( INTELXL_PFINT_DYN_CTL0_CLEARPBA |
1465 INTELXL_PFINT_DYN_CTL0_INTENA_MASK ),
1466 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1467 }
1468
1469 /* Poll for completed packets */
1470 intelxl_poll_tx ( netdev );
1471
1472 /* Poll for received packets */
1473 intelxl_poll_rx ( netdev );
1474
1475 /* Poll for admin events */
1476 intelxl_poll_admin ( netdev );
1477
1478 /* Refill RX ring */
1479 intelxl_refill_rx ( intelxl );
1480 }
1481
1482 /**
1483 * Enable or disable interrupts
1484 *
1485 * @v netdev Network device
1486 * @v enable Interrupts should be enabled
1487 */
1488 static void intelxl_irq ( struct net_device *netdev, int enable ) {
1489 struct intelxl_nic *intelxl = netdev->priv;
1490
1491 if ( enable ) {
1492 writel ( INTELXL_PFINT_DYN_CTL0_INTENA,
1493 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1494 } else {
1495 writel ( 0, intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1496 }
1497 }
1498
1499 /** Network device operations */
1500 static struct net_device_operations intelxl_operations = {
1501 .open = intelxl_open,
1502 .close = intelxl_close,
1503 .transmit = intelxl_transmit,
1504 .poll = intelxl_poll,
1505 .irq = intelxl_irq,
1506 };
1507
1508 /******************************************************************************
1509 *
1510 * PCI interface
1511 *
1512 ******************************************************************************
1513 */
1514
1515 /**
1516 * Probe PCI device
1517 *
1518 * @v pci PCI device
1519 * @ret rc Return status code
1520 */
1521 static int intelxl_probe ( struct pci_device *pci ) {
1522 struct net_device *netdev;
1523 struct intelxl_nic *intelxl;
1524 uint32_t pfgen_portnum;
1525 uint32_t pflan_qalloc;
1526 int rc;
1527
1528 /* Allocate and initialise net device */
1529 netdev = alloc_etherdev ( sizeof ( *intelxl ) );
1530 if ( ! netdev ) {
1531 rc = -ENOMEM;
1532 goto err_alloc;
1533 }
1534 netdev_init ( netdev, &intelxl_operations );
1535 intelxl = netdev->priv;
1536 pci_set_drvdata ( pci, netdev );
1537 netdev->dev = &pci->dev;
1538 memset ( intelxl, 0, sizeof ( *intelxl ) );
1539 intelxl->pf = PCI_FUNC ( pci->busdevfn );
1540 intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD,
1541 &intelxl_admin_offsets );
1542 intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT,
1543 &intelxl_admin_offsets );
1544 intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
1545 sizeof ( intelxl->tx.desc.tx[0] ),
1546 intelxl_context_tx );
1547 intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
1548 sizeof ( intelxl->rx.desc.rx[0] ),
1549 intelxl_context_rx );
1550
1551 /* Fix up PCI device */
1552 adjust_pci_device ( pci );
1553
1554 /* Map registers */
1555 intelxl->regs = ioremap ( pci->membase, INTELXL_BAR_SIZE );
1556 if ( ! intelxl->regs ) {
1557 rc = -ENODEV;
1558 goto err_ioremap;
1559 }
1560
1561 /* Reset the NIC */
1562 if ( ( rc = intelxl_reset ( intelxl ) ) != 0 )
1563 goto err_reset;
1564
1565 /* Get port number and base queue number */
1566 pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM );
1567 intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum );
1568 pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC );
1569 intelxl->base = INTELXL_PFLAN_QALLOC_FIRSTQ ( pflan_qalloc );
1570 DBGC ( intelxl, "INTELXL %p PF %d using port %d queues [%#04x-%#04x]\n",
1571 intelxl, intelxl->pf, intelxl->port, intelxl->base,
1572 INTELXL_PFLAN_QALLOC_LASTQ ( pflan_qalloc ) );
1573
1574 /* Fetch MAC address and maximum frame size */
1575 if ( ( rc = intelxl_fetch_mac ( intelxl, netdev ) ) != 0 )
1576 goto err_fetch_mac;
1577
1578 /* Open admin queues */
1579 if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 )
1580 goto err_open_admin;
1581
1582 /* Get switch configuration */
1583 if ( ( rc = intelxl_admin_switch ( intelxl ) ) != 0 )
1584 goto err_admin_switch;
1585
1586 /* Get VSI configuration */
1587 if ( ( rc = intelxl_admin_vsi ( intelxl ) ) != 0 )
1588 goto err_admin_vsi;
1589
1590 /* Configure switch for promiscuous mode */
1591 if ( ( rc = intelxl_admin_promisc ( intelxl ) ) != 0 )
1592 goto err_admin_promisc;
1593
1594 /* Configure queue register addresses */
1595 intelxl->tx.reg = INTELXL_QTX ( intelxl->queue );
1596 intelxl->tx.tail = ( intelxl->tx.reg + INTELXL_QXX_TAIL );
1597 intelxl->rx.reg = INTELXL_QRX ( intelxl->queue );
1598 intelxl->rx.tail = ( intelxl->rx.reg + INTELXL_QXX_TAIL );
1599
1600 /* Configure interrupt causes */
1601 writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE |
1602 INTELXL_QINT_TQCTL_CAUSE_ENA ),
1603 intelxl->regs + INTELXL_QINT_TQCTL ( intelxl->queue ) );
1604 writel ( ( INTELXL_QINT_RQCTL_NEXTQ_INDX ( intelxl->queue ) |
1605 INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX |
1606 INTELXL_QINT_RQCTL_CAUSE_ENA ),
1607 intelxl->regs + INTELXL_QINT_RQCTL ( intelxl->queue ) );
1608 writel ( ( INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( intelxl->queue ) |
1609 INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX ),
1610 intelxl->regs + INTELXL_PFINT_LNKLST0 );
1611 writel ( INTELXL_PFINT_ICR0_ENA_ADMINQ,
1612 intelxl->regs + INTELXL_PFINT_ICR0_ENA );
1613
1614 /* Register network device */
1615 if ( ( rc = register_netdev ( netdev ) ) != 0 )
1616 goto err_register_netdev;
1617
1618 /* Set initial link state */
1619 intelxl_admin_link ( netdev );
1620
1621 return 0;
1622
1623 unregister_netdev ( netdev );
1624 err_register_netdev:
1625 err_admin_promisc:
1626 err_admin_vsi:
1627 err_admin_switch:
1628 intelxl_close_admin ( intelxl );
1629 err_open_admin:
1630 err_fetch_mac:
1631 intelxl_reset ( intelxl );
1632 err_reset:
1633 iounmap ( intelxl->regs );
1634 err_ioremap:
1635 netdev_nullify ( netdev );
1636 netdev_put ( netdev );
1637 err_alloc:
1638 return rc;
1639 }
1640
1641 /**
1642 * Remove PCI device
1643 *
1644 * @v pci PCI device
1645 */
1646 static void intelxl_remove ( struct pci_device *pci ) {
1647 struct net_device *netdev = pci_get_drvdata ( pci );
1648 struct intelxl_nic *intelxl = netdev->priv;
1649
1650 /* Unregister network device */
1651 unregister_netdev ( netdev );
1652
1653 /* Close admin queues */
1654 intelxl_close_admin ( intelxl );
1655
1656 /* Reset the NIC */
1657 intelxl_reset ( intelxl );
1658
1659 /* Free network device */
1660 iounmap ( intelxl->regs );
1661 netdev_nullify ( netdev );
1662 netdev_put ( netdev );
1663 }
1664
1665 /** PCI device IDs */
1666 static struct pci_device_id intelxl_nics[] = {
1667 PCI_ROM ( 0x8086, 0x1572, "x710-sfp", "X710 10GbE SFP+", 0 ),
1668 PCI_ROM ( 0x8086, 0x1574, "xl710-qemu", "Virtual XL710", 0 ),
1669 PCI_ROM ( 0x8086, 0x1580, "xl710-kx-b", "XL710 40GbE backplane", 0 ),
1670 PCI_ROM ( 0x8086, 0x1581, "xl710-kx-c", "XL710 10GbE backplane", 0 ),
1671 PCI_ROM ( 0x8086, 0x1583, "xl710-qda2", "XL710 40GbE QSFP+", 0 ),
1672 PCI_ROM ( 0x8086, 0x1584, "xl710-qda1", "XL710 40GbE QSFP+", 0 ),
1673 PCI_ROM ( 0x8086, 0x1585, "x710-qsfp", "X710 10GbE QSFP+", 0 ),
1674 PCI_ROM ( 0x8086, 0x1586, "x710-10gt", "X710 10GBASE-T", 0 ),
1675 PCI_ROM ( 0x8086, 0x1587, "x710-kr2", "XL710 20GbE backplane", 0 ),
1676 PCI_ROM ( 0x8086, 0x1588, "x710-kr2-a", "XL710 20GbE backplane", 0 ),
1677 PCI_ROM ( 0x8086, 0x1589, "x710-10gt4", "X710 10GBASE-T4", 0 ),
1678 PCI_ROM ( 0x8086, 0x158a, "xxv710", "XXV710 25GbE backplane", 0 ),
1679 PCI_ROM ( 0x8086, 0x158b, "xxv710-sfp28", "XXV710 25GbE SFP28", 0 ),
1680 PCI_ROM ( 0x8086, 0x37ce, "x722-kx", "X722 10GbE backplane", 0 ),
1681 PCI_ROM ( 0x8086, 0x37cf, "x722-qsfp", "X722 10GbE QSFP+", 0 ),
1682 PCI_ROM ( 0x8086, 0x37d0, "x722-sfp", "X722 10GbE SFP+", 0 ),
1683 PCI_ROM ( 0x8086, 0x37d1, "x722-1gt", "X722 1GBASE-T", 0 ),
1684 PCI_ROM ( 0x8086, 0x37d2, "x722-10gt", "X722 10GBASE-T", 0 ),
1685 PCI_ROM ( 0x8086, 0x37d3, "x722-sfp-i", "X722 10GbE SFP+", 0 ),
1686 };
1687
1688 /** PCI driver */
1689 struct pci_driver intelxl_driver __pci_driver = {
1690 .ids = intelxl_nics,
1691 .id_count = ( sizeof ( intelxl_nics ) / sizeof ( intelxl_nics[0] ) ),
1692 .probe = intelxl_probe,
1693 .remove = intelxl_remove,
1694 };