[intelxl] Split out ring creation from context programming
[ipxe.git] / src / drivers / net / intelxl.c
1 /*
2 * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <byteswap.h>
32 #include <ipxe/netdevice.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/if_ether.h>
35 #include <ipxe/vlan.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/malloc.h>
38 #include <ipxe/pci.h>
39 #include <ipxe/version.h>
40 #include "intelxl.h"
41
42 /** @file
43 *
44 * Intel 40 Gigabit Ethernet network card driver
45 *
46 */
47
48 static void intelxl_reopen_admin ( struct intelxl_nic *intelxl );
49
50 /******************************************************************************
51 *
52 * Device reset
53 *
54 ******************************************************************************
55 */
56
57 /**
58 * Reset hardware
59 *
60 * @v intelxl Intel device
61 * @ret rc Return status code
62 */
63 static int intelxl_reset ( struct intelxl_nic *intelxl ) {
64 uint32_t pfgen_ctrl;
65
66 /* Perform a global software reset */
67 pfgen_ctrl = readl ( intelxl->regs + INTELXL_PFGEN_CTRL );
68 writel ( ( pfgen_ctrl | INTELXL_PFGEN_CTRL_PFSWR ),
69 intelxl->regs + INTELXL_PFGEN_CTRL );
70 mdelay ( INTELXL_RESET_DELAY_MS );
71
72 return 0;
73 }
74
75 /******************************************************************************
76 *
77 * MAC address
78 *
79 ******************************************************************************
80 */
81
82 /**
83 * Fetch initial MAC address and maximum frame size
84 *
85 * @v intelxl Intel device
86 * @v netdev Network device
87 * @ret rc Return status code
88 */
89 static int intelxl_fetch_mac ( struct intelxl_nic *intelxl,
90 struct net_device *netdev ) {
91 union intelxl_receive_address mac;
92 uint32_t prtgl_sal;
93 uint32_t prtgl_sah;
94 size_t mfs;
95
96 /* Read NVM-loaded address */
97 prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL );
98 prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH );
99 mac.reg.low = cpu_to_le32 ( prtgl_sal );
100 mac.reg.high = cpu_to_le32 ( prtgl_sah );
101
102 /* Check that address is valid */
103 if ( ! is_valid_ether_addr ( mac.raw ) ) {
104 DBGC ( intelxl, "INTELXL %p has invalid MAC address (%s)\n",
105 intelxl, eth_ntoa ( mac.raw ) );
106 return -ENOENT;
107 }
108
109 /* Copy MAC address */
110 DBGC ( intelxl, "INTELXL %p has autoloaded MAC address %s\n",
111 intelxl, eth_ntoa ( mac.raw ) );
112 memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN );
113
114 /* Get maximum frame size */
115 mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah );
116 netdev->max_pkt_len = ( mfs - 4 /* CRC */ );
117
118 return 0;
119 }
120
121 /******************************************************************************
122 *
123 * Admin queue
124 *
125 ******************************************************************************
126 */
127
128 /** Admin queue register offsets */
129 static const struct intelxl_admin_offsets intelxl_admin_offsets = {
130 .bal = INTELXL_ADMIN_BAL,
131 .bah = INTELXL_ADMIN_BAH,
132 .len = INTELXL_ADMIN_LEN,
133 .head = INTELXL_ADMIN_HEAD,
134 .tail = INTELXL_ADMIN_TAIL,
135 };
136
137 /**
138 * Allocate admin queue
139 *
140 * @v intelxl Intel device
141 * @v admin Admin queue
142 * @ret rc Return status code
143 */
144 static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
145 struct intelxl_admin *admin ) {
146 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
147 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
148
149 /* Allocate admin queue */
150 admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
151 if ( ! admin->buf )
152 return -ENOMEM;
153 admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
154
155 DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf "
156 "[%08llx,%08llx)\n", intelxl,
157 ( ( admin == &intelxl->command ) ? 'T' : 'R' ),
158 ( ( unsigned long long ) virt_to_bus ( admin->desc ) ),
159 ( ( unsigned long long ) ( virt_to_bus ( admin->desc ) + len ) ),
160 ( ( unsigned long long ) virt_to_bus ( admin->buf ) ),
161 ( ( unsigned long long ) ( virt_to_bus ( admin->buf ) +
162 buf_len ) ) );
163 return 0;
164 }
165
166 /**
167 * Enable admin queue
168 *
169 * @v intelxl Intel device
170 * @v admin Admin queue
171 */
172 static void intelxl_enable_admin ( struct intelxl_nic *intelxl,
173 struct intelxl_admin *admin ) {
174 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
175 const struct intelxl_admin_offsets *regs = admin->regs;
176 void *admin_regs = ( intelxl->regs + admin->base );
177 physaddr_t address;
178
179 /* Initialise admin queue */
180 memset ( admin->desc, 0, len );
181
182 /* Reset head and tail registers */
183 writel ( 0, admin_regs + regs->head );
184 writel ( 0, admin_regs + regs->tail );
185
186 /* Reset queue index */
187 admin->index = 0;
188
189 /* Program queue address */
190 address = virt_to_bus ( admin->desc );
191 writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal );
192 if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
193 writel ( ( ( ( uint64_t ) address ) >> 32 ),
194 admin_regs + regs->bah );
195 } else {
196 writel ( 0, admin_regs + regs->bah );
197 }
198
199 /* Program queue length and enable queue */
200 writel ( ( INTELXL_ADMIN_LEN_LEN ( INTELXL_ADMIN_NUM_DESC ) |
201 INTELXL_ADMIN_LEN_ENABLE ),
202 admin_regs + regs->len );
203 }
204
205 /**
206 * Disable admin queue
207 *
208 * @v intelxl Intel device
209 * @v admin Admin queue
210 */
211 static void intelxl_disable_admin ( struct intelxl_nic *intelxl,
212 struct intelxl_admin *admin ) {
213 const struct intelxl_admin_offsets *regs = admin->regs;
214 void *admin_regs = ( intelxl->regs + admin->base );
215
216 /* Disable queue */
217 writel ( 0, admin_regs + regs->len );
218 }
219
220 /**
221 * Free admin queue
222 *
223 * @v intelxl Intel device
224 * @v admin Admin queue
225 */
226 static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused,
227 struct intelxl_admin *admin ) {
228 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
229 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
230
231 /* Free queue */
232 free_dma ( admin->buf, ( buf_len + len ) );
233 }
234
235 /**
236 * Get next admin command queue descriptor
237 *
238 * @v intelxl Intel device
239 * @ret cmd Command descriptor
240 */
241 static struct intelxl_admin_descriptor *
242 intelxl_admin_command_descriptor ( struct intelxl_nic *intelxl ) {
243 struct intelxl_admin *admin = &intelxl->command;
244 struct intelxl_admin_descriptor *cmd;
245
246 /* Get and initialise next descriptor */
247 cmd = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
248 memset ( cmd, 0, sizeof ( *cmd ) );
249 return cmd;
250 }
251
252 /**
253 * Get next admin command queue data buffer
254 *
255 * @v intelxl Intel device
256 * @ret buf Data buffer
257 */
258 static union intelxl_admin_buffer *
259 intelxl_admin_command_buffer ( struct intelxl_nic *intelxl ) {
260 struct intelxl_admin *admin = &intelxl->command;
261 union intelxl_admin_buffer *buf;
262
263 /* Get next data buffer */
264 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
265 memset ( buf, 0, sizeof ( *buf ) );
266 return buf;
267 }
268
269 /**
270 * Initialise admin event queue descriptor
271 *
272 * @v intelxl Intel device
273 * @v index Event queue index
274 */
275 static void intelxl_admin_event_init ( struct intelxl_nic *intelxl,
276 unsigned int index ) {
277 struct intelxl_admin *admin = &intelxl->event;
278 struct intelxl_admin_descriptor *evt;
279 union intelxl_admin_buffer *buf;
280 uint64_t address;
281
282 /* Initialise descriptor */
283 evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
284 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
285 address = virt_to_bus ( buf );
286 evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
287 evt->len = cpu_to_le16 ( sizeof ( *buf ) );
288 evt->params.buffer.high = cpu_to_le32 ( address >> 32 );
289 evt->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
290 }
291
292 /**
293 * Issue admin queue command
294 *
295 * @v intelxl Intel device
296 * @ret rc Return status code
297 */
298 static int intelxl_admin_command ( struct intelxl_nic *intelxl ) {
299 struct intelxl_admin *admin = &intelxl->command;
300 const struct intelxl_admin_offsets *regs = admin->regs;
301 void *admin_regs = ( intelxl->regs + admin->base );
302 struct intelxl_admin_descriptor *cmd;
303 union intelxl_admin_buffer *buf;
304 uint64_t address;
305 uint32_t cookie;
306 unsigned int index;
307 unsigned int tail;
308 unsigned int i;
309 int rc;
310
311 /* Get next queue entry */
312 index = admin->index++;
313 tail = ( admin->index % INTELXL_ADMIN_NUM_DESC );
314 cmd = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
315 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
316 DBGC2 ( intelxl, "INTELXL %p admin command %#x opcode %#04x",
317 intelxl, index, le16_to_cpu ( cmd->opcode ) );
318 if ( cmd->vopcode )
319 DBGC2 ( intelxl, "/%#08x", le32_to_cpu ( cmd->vopcode ) );
320 DBGC2 ( intelxl, ":\n" );
321
322 /* Sanity checks */
323 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_DD ) ) );
324 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_CMP ) ) );
325 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_ERR ) ) );
326 assert ( cmd->ret == 0 );
327
328 /* Populate data buffer address if applicable */
329 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
330 address = virt_to_bus ( buf );
331 cmd->params.buffer.high = cpu_to_le32 ( address >> 32 );
332 cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
333 }
334
335 /* Populate cookie, if not being (ab)used for VF opcode */
336 if ( ! cmd->vopcode )
337 cmd->cookie = cpu_to_le32 ( index );
338
339 /* Record cookie */
340 cookie = cmd->cookie;
341
342 /* Post command descriptor */
343 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
344 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
345 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
346 le16_to_cpu ( cmd->len ) );
347 }
348 wmb();
349 writel ( tail, admin_regs + regs->tail );
350
351 /* Wait for completion */
352 for ( i = 0 ; i < INTELXL_ADMIN_MAX_WAIT_MS ; i++ ) {
353
354 /* If response is not complete, delay 1ms and retry */
355 if ( ! ( cmd->flags & INTELXL_ADMIN_FL_DD ) ) {
356 mdelay ( 1 );
357 continue;
358 }
359 DBGC2 ( intelxl, "INTELXL %p admin command %#x response:\n",
360 intelxl, index );
361 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd,
362 sizeof ( *cmd ) );
363
364 /* Check for cookie mismatch */
365 if ( cmd->cookie != cookie ) {
366 DBGC ( intelxl, "INTELXL %p admin command %#x bad "
367 "cookie %#x\n", intelxl, index,
368 le32_to_cpu ( cmd->cookie ) );
369 rc = -EPROTO;
370 goto err;
371 }
372
373 /* Check for errors */
374 if ( cmd->ret != 0 ) {
375 DBGC ( intelxl, "INTELXL %p admin command %#x error "
376 "%d\n", intelxl, index,
377 le16_to_cpu ( cmd->ret ) );
378 rc = -EIO;
379 goto err;
380 }
381
382 /* Success */
383 return 0;
384 }
385
386 rc = -ETIMEDOUT;
387 DBGC ( intelxl, "INTELXL %p timed out waiting for admin command %#x:\n",
388 intelxl, index );
389 err:
390 DBGC_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
391 return rc;
392 }
393
394 /**
395 * Get firmware version
396 *
397 * @v intelxl Intel device
398 * @ret rc Return status code
399 */
400 static int intelxl_admin_version ( struct intelxl_nic *intelxl ) {
401 struct intelxl_admin_descriptor *cmd;
402 struct intelxl_admin_version_params *version;
403 unsigned int api;
404 int rc;
405
406 /* Populate descriptor */
407 cmd = intelxl_admin_command_descriptor ( intelxl );
408 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VERSION );
409 version = &cmd->params.version;
410
411 /* Issue command */
412 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
413 return rc;
414 api = le16_to_cpu ( version->api.major );
415 DBGC ( intelxl, "INTELXL %p firmware v%d.%d API v%d.%d\n",
416 intelxl, le16_to_cpu ( version->firmware.major ),
417 le16_to_cpu ( version->firmware.minor ),
418 api, le16_to_cpu ( version->api.minor ) );
419
420 /* Check for API compatibility */
421 if ( api > INTELXL_ADMIN_API_MAJOR ) {
422 DBGC ( intelxl, "INTELXL %p unsupported API v%d\n",
423 intelxl, api );
424 return -ENOTSUP;
425 }
426
427 return 0;
428 }
429
430 /**
431 * Report driver version
432 *
433 * @v intelxl Intel device
434 * @ret rc Return status code
435 */
436 static int intelxl_admin_driver ( struct intelxl_nic *intelxl ) {
437 struct intelxl_admin_descriptor *cmd;
438 struct intelxl_admin_driver_params *driver;
439 union intelxl_admin_buffer *buf;
440 int rc;
441
442 /* Populate descriptor */
443 cmd = intelxl_admin_command_descriptor ( intelxl );
444 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_DRIVER );
445 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF );
446 cmd->len = cpu_to_le16 ( sizeof ( buf->driver ) );
447 driver = &cmd->params.driver;
448 driver->major = product_major_version;
449 driver->minor = product_minor_version;
450 buf = intelxl_admin_command_buffer ( intelxl );
451 snprintf ( buf->driver.name, sizeof ( buf->driver.name ), "%s",
452 ( product_name[0] ? product_name : product_short_name ) );
453
454 /* Issue command */
455 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
456 return rc;
457
458 return 0;
459 }
460
461 /**
462 * Shutdown admin queues
463 *
464 * @v intelxl Intel device
465 * @ret rc Return status code
466 */
467 static int intelxl_admin_shutdown ( struct intelxl_nic *intelxl ) {
468 struct intelxl_admin_descriptor *cmd;
469 struct intelxl_admin_shutdown_params *shutdown;
470 int rc;
471
472 /* Populate descriptor */
473 cmd = intelxl_admin_command_descriptor ( intelxl );
474 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SHUTDOWN );
475 shutdown = &cmd->params.shutdown;
476 shutdown->unloading = INTELXL_ADMIN_SHUTDOWN_UNLOADING;
477
478 /* Issue command */
479 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
480 return rc;
481
482 return 0;
483 }
484
485 /**
486 * Get switch configuration
487 *
488 * @v intelxl Intel device
489 * @ret rc Return status code
490 */
491 static int intelxl_admin_switch ( struct intelxl_nic *intelxl ) {
492 struct intelxl_admin_descriptor *cmd;
493 struct intelxl_admin_switch_params *sw;
494 union intelxl_admin_buffer *buf;
495 int rc;
496
497 /* Populate descriptor */
498 cmd = intelxl_admin_command_descriptor ( intelxl );
499 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SWITCH );
500 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
501 cmd->len = cpu_to_le16 ( sizeof ( buf->sw ) );
502 sw = &cmd->params.sw;
503 buf = intelxl_admin_command_buffer ( intelxl );
504
505 /* Get each configuration in turn */
506 do {
507 /* Issue command */
508 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
509 return rc;
510
511 /* Dump raw configuration */
512 DBGC2 ( intelxl, "INTELXL %p SEID %#04x:\n",
513 intelxl, le16_to_cpu ( buf->sw.cfg.seid ) );
514 DBGC2_HDA ( intelxl, 0, &buf->sw.cfg, sizeof ( buf->sw.cfg ) );
515
516 /* Parse response */
517 if ( buf->sw.cfg.type == INTELXL_ADMIN_SWITCH_TYPE_VSI ) {
518 intelxl->vsi = le16_to_cpu ( buf->sw.cfg.seid );
519 DBGC ( intelxl, "INTELXL %p VSI %#04x uplink %#04x "
520 "downlink %#04x conn %#02x\n", intelxl,
521 intelxl->vsi, le16_to_cpu ( buf->sw.cfg.uplink ),
522 le16_to_cpu ( buf->sw.cfg.downlink ),
523 buf->sw.cfg.connection );
524 }
525
526 } while ( sw->next );
527
528 /* Check that we found a VSI */
529 if ( ! intelxl->vsi ) {
530 DBGC ( intelxl, "INTELXL %p has no VSI\n", intelxl );
531 return -ENOENT;
532 }
533
534 return 0;
535 }
536
537 /**
538 * Get VSI parameters
539 *
540 * @v intelxl Intel device
541 * @ret rc Return status code
542 */
543 static int intelxl_admin_vsi ( struct intelxl_nic *intelxl ) {
544 struct intelxl_admin_descriptor *cmd;
545 struct intelxl_admin_vsi_params *vsi;
546 union intelxl_admin_buffer *buf;
547 int rc;
548
549 /* Populate descriptor */
550 cmd = intelxl_admin_command_descriptor ( intelxl );
551 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VSI );
552 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
553 cmd->len = cpu_to_le16 ( sizeof ( buf->vsi ) );
554 vsi = &cmd->params.vsi;
555 vsi->vsi = cpu_to_le16 ( intelxl->vsi );
556 buf = intelxl_admin_command_buffer ( intelxl );
557
558 /* Issue command */
559 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
560 return rc;
561
562 /* Parse response */
563 intelxl->queue = le16_to_cpu ( buf->vsi.queue[0] );
564 intelxl->qset = le16_to_cpu ( buf->vsi.qset[0] );
565 DBGC ( intelxl, "INTELXL %p VSI %#04x queue %#04x qset %#04x\n",
566 intelxl, intelxl->vsi, intelxl->queue, intelxl->qset );
567
568 return 0;
569 }
570
571 /**
572 * Set VSI promiscuous modes
573 *
574 * @v intelxl Intel device
575 * @ret rc Return status code
576 */
577 static int intelxl_admin_promisc ( struct intelxl_nic *intelxl ) {
578 struct intelxl_admin_descriptor *cmd;
579 struct intelxl_admin_promisc_params *promisc;
580 uint16_t flags;
581 int rc;
582
583 /* Populate descriptor */
584 cmd = intelxl_admin_command_descriptor ( intelxl );
585 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_PROMISC );
586 flags = ( INTELXL_ADMIN_PROMISC_FL_UNICAST |
587 INTELXL_ADMIN_PROMISC_FL_MULTICAST |
588 INTELXL_ADMIN_PROMISC_FL_BROADCAST |
589 INTELXL_ADMIN_PROMISC_FL_VLAN );
590 promisc = &cmd->params.promisc;
591 promisc->flags = cpu_to_le16 ( flags );
592 promisc->valid = cpu_to_le16 ( flags );
593 promisc->vsi = cpu_to_le16 ( intelxl->vsi );
594
595 /* Issue command */
596 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
597 return rc;
598
599 return 0;
600 }
601
602 /**
603 * Restart autonegotiation
604 *
605 * @v intelxl Intel device
606 * @ret rc Return status code
607 */
608 static int intelxl_admin_autoneg ( struct intelxl_nic *intelxl ) {
609 struct intelxl_admin_descriptor *cmd;
610 struct intelxl_admin_autoneg_params *autoneg;
611 int rc;
612
613 /* Populate descriptor */
614 cmd = intelxl_admin_command_descriptor ( intelxl );
615 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_AUTONEG );
616 autoneg = &cmd->params.autoneg;
617 autoneg->flags = ( INTELXL_ADMIN_AUTONEG_FL_RESTART |
618 INTELXL_ADMIN_AUTONEG_FL_ENABLE );
619
620 /* Issue command */
621 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
622 return rc;
623
624 return 0;
625 }
626
627 /**
628 * Get link status
629 *
630 * @v netdev Network device
631 * @ret rc Return status code
632 */
633 static int intelxl_admin_link ( struct net_device *netdev ) {
634 struct intelxl_nic *intelxl = netdev->priv;
635 struct intelxl_admin_descriptor *cmd;
636 struct intelxl_admin_link_params *link;
637 int rc;
638
639 /* Populate descriptor */
640 cmd = intelxl_admin_command_descriptor ( intelxl );
641 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_LINK );
642 link = &cmd->params.link;
643 link->notify = INTELXL_ADMIN_LINK_NOTIFY;
644
645 /* Issue command */
646 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
647 return rc;
648 DBGC ( intelxl, "INTELXL %p PHY %#02x speed %#02x status %#02x\n",
649 intelxl, link->phy, link->speed, link->status );
650
651 /* Update network device */
652 if ( link->status & INTELXL_ADMIN_LINK_UP ) {
653 netdev_link_up ( netdev );
654 } else {
655 netdev_link_down ( netdev );
656 }
657
658 return 0;
659 }
660
661 /**
662 * Handle virtual function event (when VF driver is not present)
663 *
664 * @v netdev Network device
665 * @v evt Admin queue event descriptor
666 * @v buf Admin queue event data buffer
667 */
668 __weak void
669 intelxlvf_admin_event ( struct net_device *netdev __unused,
670 struct intelxl_admin_descriptor *evt __unused,
671 union intelxl_admin_buffer *buf __unused ) {
672
673 /* Nothing to do */
674 }
675
676 /**
677 * Refill admin event queue
678 *
679 * @v intelxl Intel device
680 */
681 static void intelxl_refill_admin ( struct intelxl_nic *intelxl ) {
682 struct intelxl_admin *admin = &intelxl->event;
683 const struct intelxl_admin_offsets *regs = admin->regs;
684 void *admin_regs = ( intelxl->regs + admin->base );
685 unsigned int tail;
686
687 /* Update tail pointer */
688 tail = ( ( admin->index + INTELXL_ADMIN_NUM_DESC - 1 ) %
689 INTELXL_ADMIN_NUM_DESC );
690 wmb();
691 writel ( tail, admin_regs + regs->tail );
692 }
693
694 /**
695 * Poll admin event queue
696 *
697 * @v netdev Network device
698 */
699 static void intelxl_poll_admin ( struct net_device *netdev ) {
700 struct intelxl_nic *intelxl = netdev->priv;
701 struct intelxl_admin *admin = &intelxl->event;
702 struct intelxl_admin_descriptor *evt;
703 union intelxl_admin_buffer *buf;
704
705 /* Check for events */
706 while ( 1 ) {
707
708 /* Get next event descriptor and data buffer */
709 evt = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
710 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
711
712 /* Stop if descriptor is not yet completed */
713 if ( ! ( evt->flags & INTELXL_ADMIN_FL_DD ) )
714 return;
715 DBGC2 ( intelxl, "INTELXL %p admin event %#x:\n",
716 intelxl, admin->index );
717 DBGC2_HDA ( intelxl, virt_to_phys ( evt ), evt,
718 sizeof ( *evt ) );
719 if ( evt->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
720 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
721 le16_to_cpu ( evt->len ) );
722 }
723
724 /* Handle event */
725 switch ( evt->opcode ) {
726 case cpu_to_le16 ( INTELXL_ADMIN_LINK ):
727 intelxl_admin_link ( netdev );
728 break;
729 case cpu_to_le16 ( INTELXL_ADMIN_SEND_TO_VF ):
730 intelxlvf_admin_event ( netdev, evt, buf );
731 break;
732 default:
733 DBGC ( intelxl, "INTELXL %p admin event %#x "
734 "unrecognised opcode %#04x\n", intelxl,
735 admin->index, le16_to_cpu ( evt->opcode ) );
736 break;
737 }
738
739 /* Reset descriptor and refill queue */
740 intelxl_admin_event_init ( intelxl, admin->index );
741 admin->index++;
742 intelxl_refill_admin ( intelxl );
743 }
744 }
745
746 /**
747 * Open admin queues
748 *
749 * @v intelxl Intel device
750 * @ret rc Return status code
751 */
752 static int intelxl_open_admin ( struct intelxl_nic *intelxl ) {
753 int rc;
754
755 /* Allocate admin event queue */
756 if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->event ) ) != 0 )
757 goto err_alloc_event;
758
759 /* Allocate admin command queue */
760 if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->command ) ) != 0 )
761 goto err_alloc_command;
762
763 /* (Re)open admin queues */
764 intelxl_reopen_admin ( intelxl );
765
766 /* Get firmware version */
767 if ( ( rc = intelxl_admin_version ( intelxl ) ) != 0 )
768 goto err_version;
769
770 /* Report driver version */
771 if ( ( rc = intelxl_admin_driver ( intelxl ) ) != 0 )
772 goto err_driver;
773
774 return 0;
775
776 err_driver:
777 err_version:
778 intelxl_disable_admin ( intelxl, &intelxl->command );
779 intelxl_disable_admin ( intelxl, &intelxl->event );
780 intelxl_free_admin ( intelxl, &intelxl->command );
781 err_alloc_command:
782 intelxl_free_admin ( intelxl, &intelxl->event );
783 err_alloc_event:
784 return rc;
785 }
786
787 /**
788 * Reopen admin queues (after virtual function reset)
789 *
790 * @v intelxl Intel device
791 */
792 static void intelxl_reopen_admin ( struct intelxl_nic *intelxl ) {
793 unsigned int i;
794
795 /* Enable admin event queue */
796 intelxl_enable_admin ( intelxl, &intelxl->event );
797
798 /* Enable admin command queue */
799 intelxl_enable_admin ( intelxl, &intelxl->command );
800
801 /* Initialise all admin event queue descriptors */
802 for ( i = 0 ; i < INTELXL_ADMIN_NUM_DESC ; i++ )
803 intelxl_admin_event_init ( intelxl, i );
804
805 /* Post all descriptors to event queue */
806 intelxl_refill_admin ( intelxl );
807 }
808
809 /**
810 * Close admin queues
811 *
812 * @v intelxl Intel device
813 */
814 static void intelxl_close_admin ( struct intelxl_nic *intelxl ) {
815
816 /* Shut down admin queues */
817 intelxl_admin_shutdown ( intelxl );
818
819 /* Disable admin queues */
820 intelxl_disable_admin ( intelxl, &intelxl->command );
821 intelxl_disable_admin ( intelxl, &intelxl->event );
822
823 /* Free admin queues */
824 intelxl_free_admin ( intelxl, &intelxl->command );
825 intelxl_free_admin ( intelxl, &intelxl->event );
826 }
827
828 /******************************************************************************
829 *
830 * Descriptor rings
831 *
832 ******************************************************************************
833 */
834
835 /**
836 * Allocate descriptor ring
837 *
838 * @v intelxl Intel device
839 * @v ring Descriptor ring
840 * @ret rc Return status code
841 */
842 static int intelxl_alloc_ring ( struct intelxl_nic *intelxl,
843 struct intelxl_ring *ring ) {
844 physaddr_t address;
845 int rc;
846
847 /* Allocate descriptor ring */
848 ring->desc.raw = malloc_dma ( ring->len, INTELXL_ALIGN );
849 if ( ! ring->desc.raw ) {
850 rc = -ENOMEM;
851 goto err_alloc;
852 }
853 address = virt_to_bus ( ring->desc.raw );
854
855 /* Initialise descriptor ring */
856 memset ( ring->desc.raw, 0, ring->len );
857
858 /* Reset tail pointer */
859 writel ( 0, ( intelxl->regs + ring->tail ) );
860
861 /* Reset counters */
862 ring->prod = 0;
863 ring->cons = 0;
864
865 DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n",
866 intelxl, ( ring->reg + ring->tail ),
867 ( ( unsigned long long ) address ),
868 ( ( unsigned long long ) address + ring->len ) );
869
870 return 0;
871
872 free_dma ( ring->desc.raw, ring->len );
873 err_alloc:
874 return rc;
875 }
876
877 /**
878 * Free descriptor ring
879 *
880 * @v intelxl Intel device
881 * @v ring Descriptor ring
882 */
883 static void intelxl_free_ring ( struct intelxl_nic *intelxl __unused,
884 struct intelxl_ring *ring ) {
885
886 /* Free descriptor ring */
887 free_dma ( ring->desc.raw, ring->len );
888 ring->desc.raw = NULL;
889 }
890
891 /**
892 * Dump queue context (for debugging)
893 *
894 * @v intelxl Intel device
895 * @v op Context operation
896 * @v len Size of context
897 */
898 static __attribute__ (( unused )) void
899 intelxl_context_dump ( struct intelxl_nic *intelxl, uint32_t op, size_t len ) {
900 struct intelxl_context_line line;
901 uint32_t pfcm_lanctxctl;
902 uint32_t pfcm_lanctxstat;
903 unsigned int queue;
904 unsigned int index;
905 unsigned int i;
906
907 /* Do nothing unless debug output is enabled */
908 if ( ! DBG_EXTRA )
909 return;
910
911 /* Dump context */
912 DBGC2 ( intelxl, "INTELXL %p context %#08x:\n", intelxl, op );
913 for ( index = 0 ; ( sizeof ( line ) * index ) < len ; index++ ) {
914
915 /* Start context operation */
916 queue = ( intelxl->base + intelxl->queue );
917 pfcm_lanctxctl =
918 ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
919 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
920 INTELXL_PFCM_LANCTXCTL_OP_CODE_READ | op );
921 writel ( pfcm_lanctxctl,
922 intelxl->regs + INTELXL_PFCM_LANCTXCTL );
923
924 /* Wait for operation to complete */
925 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
926
927 /* Check if operation is complete */
928 pfcm_lanctxstat = readl ( intelxl->regs +
929 INTELXL_PFCM_LANCTXSTAT );
930 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
931 break;
932
933 /* Delay */
934 mdelay ( 1 );
935 }
936
937 /* Read context data */
938 for ( i = 0 ; i < ( sizeof ( line ) /
939 sizeof ( line.raw[0] ) ) ; i++ ) {
940 line.raw[i] = readl ( intelxl->regs +
941 INTELXL_PFCM_LANCTXDATA ( i ) );
942 }
943 DBGC2_HDA ( intelxl, ( sizeof ( line ) * index ),
944 &line, sizeof ( line ) );
945 }
946 }
947
948 /**
949 * Program queue context line
950 *
951 * @v intelxl Intel device
952 * @v line Queue context line
953 * @v index Line number
954 * @v op Context operation
955 * @ret rc Return status code
956 */
957 static int intelxl_context_line ( struct intelxl_nic *intelxl,
958 struct intelxl_context_line *line,
959 unsigned int index, uint32_t op ) {
960 uint32_t pfcm_lanctxctl;
961 uint32_t pfcm_lanctxstat;
962 unsigned int queue;
963 unsigned int i;
964
965 /* Write context data */
966 for ( i = 0; i < ( sizeof ( *line ) / sizeof ( line->raw[0] ) ); i++ ) {
967 writel ( le32_to_cpu ( line->raw[i] ),
968 intelxl->regs + INTELXL_PFCM_LANCTXDATA ( i ) );
969 }
970
971 /* Start context operation */
972 queue = ( intelxl->base + intelxl->queue );
973 pfcm_lanctxctl = ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
974 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
975 INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE | op );
976 writel ( pfcm_lanctxctl, intelxl->regs + INTELXL_PFCM_LANCTXCTL );
977
978 /* Wait for operation to complete */
979 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
980
981 /* Check if operation is complete */
982 pfcm_lanctxstat = readl ( intelxl->regs +
983 INTELXL_PFCM_LANCTXSTAT );
984 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
985 return 0;
986
987 /* Delay */
988 mdelay ( 1 );
989 }
990
991 DBGC ( intelxl, "INTELXL %p timed out waiting for context: %#08x\n",
992 intelxl, pfcm_lanctxctl );
993 return -ETIMEDOUT;
994 }
995
996 /**
997 * Program queue context
998 *
999 * @v intelxl Intel device
1000 * @v line Queue context lines
1001 * @v len Size of context
1002 * @v op Context operation
1003 * @ret rc Return status code
1004 */
1005 static int intelxl_context ( struct intelxl_nic *intelxl,
1006 struct intelxl_context_line *line,
1007 size_t len, uint32_t op ) {
1008 unsigned int index;
1009 int rc;
1010
1011 DBGC2 ( intelxl, "INTELXL %p context %#08x len %#zx:\n",
1012 intelxl, op, len );
1013 DBGC2_HDA ( intelxl, 0, line, len );
1014
1015 /* Program one line at a time */
1016 for ( index = 0 ; ( sizeof ( *line ) * index ) < len ; index++ ) {
1017 if ( ( rc = intelxl_context_line ( intelxl, line++, index,
1018 op ) ) != 0 )
1019 return rc;
1020 }
1021
1022 return 0;
1023 }
1024
1025 /**
1026 * Program transmit queue context
1027 *
1028 * @v intelxl Intel device
1029 * @v address Descriptor ring base address
1030 * @ret rc Return status code
1031 */
1032 static int intelxl_context_tx ( struct intelxl_nic *intelxl,
1033 physaddr_t address ) {
1034 union {
1035 struct intelxl_context_tx tx;
1036 struct intelxl_context_line line;
1037 } ctx;
1038 int rc;
1039
1040 /* Initialise context */
1041 memset ( &ctx, 0, sizeof ( ctx ) );
1042 ctx.tx.flags = cpu_to_le16 ( INTELXL_CTX_TX_FL_NEW );
1043 ctx.tx.base = cpu_to_le64 ( INTELXL_CTX_TX_BASE ( address ) );
1044 ctx.tx.count =
1045 cpu_to_le16 ( INTELXL_CTX_TX_COUNT ( INTELXL_TX_NUM_DESC ) );
1046 ctx.tx.qset = INTELXL_CTX_TX_QSET ( intelxl->qset );
1047
1048 /* Program context */
1049 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
1050 INTELXL_PFCM_LANCTXCTL_TYPE_TX ) ) != 0 )
1051 return rc;
1052
1053 return 0;
1054 }
1055
1056 /**
1057 * Program receive queue context
1058 *
1059 * @v intelxl Intel device
1060 * @v address Descriptor ring base address
1061 * @ret rc Return status code
1062 */
1063 static int intelxl_context_rx ( struct intelxl_nic *intelxl,
1064 physaddr_t address ) {
1065 union {
1066 struct intelxl_context_rx rx;
1067 struct intelxl_context_line line;
1068 } ctx;
1069 uint64_t base_count;
1070 int rc;
1071
1072 /* Initialise context */
1073 memset ( &ctx, 0, sizeof ( ctx ) );
1074 base_count = INTELXL_CTX_RX_BASE_COUNT ( address, INTELXL_RX_NUM_DESC );
1075 ctx.rx.base_count = cpu_to_le64 ( base_count );
1076 ctx.rx.len = cpu_to_le16 ( INTELXL_CTX_RX_LEN ( intelxl->mfs ) );
1077 ctx.rx.flags = ( INTELXL_CTX_RX_FL_DSIZE | INTELXL_CTX_RX_FL_CRCSTRIP );
1078 ctx.rx.mfs = cpu_to_le16 ( INTELXL_CTX_RX_MFS ( intelxl->mfs ) );
1079
1080 /* Program context */
1081 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
1082 INTELXL_PFCM_LANCTXCTL_TYPE_RX ) ) != 0 )
1083 return rc;
1084
1085 return 0;
1086 }
1087
1088 /**
1089 * Enable descriptor ring
1090 *
1091 * @v intelxl Intel device
1092 * @v ring Descriptor ring
1093 * @ret rc Return status code
1094 */
1095 static int intelxl_enable_ring ( struct intelxl_nic *intelxl,
1096 struct intelxl_ring *ring ) {
1097 void *ring_regs = ( intelxl->regs + ring->reg );
1098 uint32_t qxx_ena;
1099
1100 /* Enable ring */
1101 writel ( INTELXL_QXX_ENA_REQ, ( ring_regs + INTELXL_QXX_ENA ) );
1102 udelay ( INTELXL_QUEUE_ENABLE_DELAY_US );
1103 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1104 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) {
1105 DBGC ( intelxl, "INTELXL %p ring %06x failed to enable: "
1106 "%#08x\n", intelxl, ring->reg, qxx_ena );
1107 return -EIO;
1108 }
1109
1110 return 0;
1111 }
1112
1113 /**
1114 * Disable descriptor ring
1115 *
1116 * @v intelxl Intel device
1117 * @v ring Descriptor ring
1118 * @ret rc Return status code
1119 */
1120 static int intelxl_disable_ring ( struct intelxl_nic *intelxl,
1121 struct intelxl_ring *ring ) {
1122 void *ring_regs = ( intelxl->regs + ring->reg );
1123 uint32_t qxx_ena;
1124 unsigned int i;
1125
1126 /* Disable ring */
1127 writel ( 0, ( ring_regs + INTELXL_QXX_ENA ) );
1128
1129 /* Wait for ring to be disabled */
1130 for ( i = 0 ; i < INTELXL_QUEUE_DISABLE_MAX_WAIT_MS ; i++ ) {
1131
1132 /* Check if ring is disabled */
1133 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1134 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) )
1135 return 0;
1136
1137 /* Delay */
1138 mdelay ( 1 );
1139 }
1140
1141 DBGC ( intelxl, "INTELXL %p ring %06x timed out waiting for disable: "
1142 "%#08x\n", intelxl, ring->reg, qxx_ena );
1143 return -ETIMEDOUT;
1144 }
1145
1146 /**
1147 * Create descriptor ring
1148 *
1149 * @v intelxl Intel device
1150 * @v ring Descriptor ring
1151 * @ret rc Return status code
1152 */
1153 static int intelxl_create_ring ( struct intelxl_nic *intelxl,
1154 struct intelxl_ring *ring ) {
1155 physaddr_t address;
1156 int rc;
1157
1158 /* Allocate descriptor ring */
1159 if ( ( rc = intelxl_alloc_ring ( intelxl, ring ) ) != 0 )
1160 goto err_alloc;
1161
1162 /* Program queue context */
1163 address = virt_to_bus ( ring->desc.raw );
1164 if ( ( rc = ring->context ( intelxl, address ) ) != 0 )
1165 goto err_context;
1166
1167 /* Enable ring */
1168 if ( ( rc = intelxl_enable_ring ( intelxl, ring ) ) != 0 )
1169 goto err_enable;
1170
1171 return 0;
1172
1173 intelxl_disable_ring ( intelxl, ring );
1174 err_enable:
1175 err_context:
1176 intelxl_free_ring ( intelxl, ring );
1177 err_alloc:
1178 return rc;
1179 }
1180
1181 /**
1182 * Destroy descriptor ring
1183 *
1184 * @v intelxl Intel device
1185 * @v ring Descriptor ring
1186 */
1187 static void intelxl_destroy_ring ( struct intelxl_nic *intelxl,
1188 struct intelxl_ring *ring ) {
1189 int rc;
1190
1191 /* Disable ring */
1192 if ( ( rc = intelxl_disable_ring ( intelxl, ring ) ) != 0 ) {
1193 /* Leak memory; there's nothing else we can do */
1194 return;
1195 }
1196
1197 /* Free descriptor ring */
1198 intelxl_free_ring ( intelxl, ring );
1199 }
1200
1201 /**
1202 * Refill receive descriptor ring
1203 *
1204 * @v intelxl Intel device
1205 */
1206 static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
1207 struct intelxl_rx_data_descriptor *rx;
1208 struct io_buffer *iobuf;
1209 unsigned int rx_idx;
1210 unsigned int rx_tail;
1211 physaddr_t address;
1212 unsigned int refilled = 0;
1213
1214 /* Refill ring */
1215 while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) {
1216
1217 /* Allocate I/O buffer */
1218 iobuf = alloc_iob ( intelxl->mfs );
1219 if ( ! iobuf ) {
1220 /* Wait for next refill */
1221 break;
1222 }
1223
1224 /* Get next receive descriptor */
1225 rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC );
1226 rx = &intelxl->rx.desc.rx[rx_idx].data;
1227
1228 /* Populate receive descriptor */
1229 address = virt_to_bus ( iobuf->data );
1230 rx->address = cpu_to_le64 ( address );
1231 rx->flags = 0;
1232
1233 /* Record I/O buffer */
1234 assert ( intelxl->rx_iobuf[rx_idx] == NULL );
1235 intelxl->rx_iobuf[rx_idx] = iobuf;
1236
1237 DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl,
1238 rx_idx, ( ( unsigned long long ) address ),
1239 ( ( unsigned long long ) address + intelxl->mfs ) );
1240 refilled++;
1241 }
1242
1243 /* Push descriptors to card, if applicable */
1244 if ( refilled ) {
1245 wmb();
1246 rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC );
1247 writel ( rx_tail, ( intelxl->regs + intelxl->rx.tail ) );
1248 }
1249 }
1250
1251 /**
1252 * Discard unused receive I/O buffers
1253 *
1254 * @v intelxl Intel device
1255 */
1256 static void intelxl_empty_rx ( struct intelxl_nic *intelxl ) {
1257 unsigned int i;
1258
1259 /* Discard any unused receive buffers */
1260 for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
1261 if ( intelxl->rx_iobuf[i] )
1262 free_iob ( intelxl->rx_iobuf[i] );
1263 intelxl->rx_iobuf[i] = NULL;
1264 }
1265 }
1266
1267 /******************************************************************************
1268 *
1269 * Network device interface
1270 *
1271 ******************************************************************************
1272 */
1273
1274 /**
1275 * Open network device
1276 *
1277 * @v netdev Network device
1278 * @ret rc Return status code
1279 */
1280 static int intelxl_open ( struct net_device *netdev ) {
1281 struct intelxl_nic *intelxl = netdev->priv;
1282 union intelxl_receive_address mac;
1283 unsigned int queue;
1284 uint32_t prtgl_sal;
1285 uint32_t prtgl_sah;
1286 int rc;
1287
1288 /* Calculate maximum frame size */
1289 intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ +
1290 INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) );
1291
1292 /* Program MAC address and maximum frame size */
1293 memset ( &mac, 0, sizeof ( mac ) );
1294 memcpy ( mac.raw, netdev->ll_addr, sizeof ( mac.raw ) );
1295 prtgl_sal = le32_to_cpu ( mac.reg.low );
1296 prtgl_sah = ( le32_to_cpu ( mac.reg.high ) |
1297 INTELXL_PRTGL_SAH_MFS_SET ( intelxl->mfs ) );
1298 writel ( prtgl_sal, intelxl->regs + INTELXL_PRTGL_SAL );
1299 writel ( prtgl_sah, intelxl->regs + INTELXL_PRTGL_SAH );
1300
1301 /* Associate transmit queue to PF */
1302 writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF |
1303 INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ),
1304 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) );
1305
1306 /* Clear transmit pre queue disable */
1307 queue = ( intelxl->base + intelxl->queue );
1308 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS |
1309 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1310 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1311
1312 /* Reset transmit queue head */
1313 writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) );
1314
1315 /* Create receive descriptor ring */
1316 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 )
1317 goto err_create_rx;
1318
1319 /* Create transmit descriptor ring */
1320 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 )
1321 goto err_create_tx;
1322
1323 /* Fill receive ring */
1324 intelxl_refill_rx ( intelxl );
1325
1326 /* Restart autonegotiation */
1327 intelxl_admin_autoneg ( intelxl );
1328
1329 /* Update link state */
1330 intelxl_admin_link ( netdev );
1331
1332 return 0;
1333
1334 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1335 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1336 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1337 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1338 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1339 err_create_tx:
1340 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1341 err_create_rx:
1342 return rc;
1343 }
1344
1345 /**
1346 * Close network device
1347 *
1348 * @v netdev Network device
1349 */
1350 static void intelxl_close ( struct net_device *netdev ) {
1351 struct intelxl_nic *intelxl = netdev->priv;
1352 unsigned int queue;
1353
1354 /* Dump contexts (for debugging) */
1355 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_TX,
1356 sizeof ( struct intelxl_context_tx ) );
1357 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_RX,
1358 sizeof ( struct intelxl_context_rx ) );
1359
1360 /* Pre-disable transmit queue */
1361 queue = ( intelxl->base + intelxl->queue );
1362 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1363 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1364 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1365 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1366
1367 /* Destroy transmit descriptor ring */
1368 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1369
1370 /* Destroy receive descriptor ring */
1371 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1372
1373 /* Discard any unused receive buffers */
1374 intelxl_empty_rx ( intelxl );
1375 }
1376
1377 /**
1378 * Transmit packet
1379 *
1380 * @v netdev Network device
1381 * @v iobuf I/O buffer
1382 * @ret rc Return status code
1383 */
1384 static int intelxl_transmit ( struct net_device *netdev,
1385 struct io_buffer *iobuf ) {
1386 struct intelxl_nic *intelxl = netdev->priv;
1387 struct intelxl_tx_data_descriptor *tx;
1388 unsigned int tx_idx;
1389 unsigned int tx_tail;
1390 physaddr_t address;
1391 size_t len;
1392
1393 /* Get next transmit descriptor */
1394 if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) {
1395 DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n",
1396 intelxl );
1397 return -ENOBUFS;
1398 }
1399 tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC );
1400 tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC );
1401 tx = &intelxl->tx.desc.tx[tx_idx].data;
1402
1403 /* Populate transmit descriptor */
1404 address = virt_to_bus ( iobuf->data );
1405 len = iob_len ( iobuf );
1406 tx->address = cpu_to_le64 ( address );
1407 tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
1408 tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
1409 INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );
1410 wmb();
1411
1412 /* Notify card that there are packets ready to transmit */
1413 writel ( tx_tail, ( intelxl->regs + intelxl->tx.tail ) );
1414
1415 DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx,
1416 ( ( unsigned long long ) address ),
1417 ( ( unsigned long long ) address + len ) );
1418 return 0;
1419 }
1420
1421 /**
1422 * Poll for completed packets
1423 *
1424 * @v netdev Network device
1425 */
1426 static void intelxl_poll_tx ( struct net_device *netdev ) {
1427 struct intelxl_nic *intelxl = netdev->priv;
1428 struct intelxl_tx_writeback_descriptor *tx_wb;
1429 unsigned int tx_idx;
1430
1431 /* Check for completed packets */
1432 while ( intelxl->tx.cons != intelxl->tx.prod ) {
1433
1434 /* Get next transmit descriptor */
1435 tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC );
1436 tx_wb = &intelxl->tx.desc.tx[tx_idx].wb;
1437
1438 /* Stop if descriptor is still in use */
1439 if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) )
1440 return;
1441 DBGC2 ( intelxl, "INTELXL %p TX %d complete\n",
1442 intelxl, tx_idx );
1443
1444 /* Complete TX descriptor */
1445 netdev_tx_complete_next ( netdev );
1446 intelxl->tx.cons++;
1447 }
1448 }
1449
1450 /**
1451 * Poll for received packets
1452 *
1453 * @v netdev Network device
1454 */
1455 static void intelxl_poll_rx ( struct net_device *netdev ) {
1456 struct intelxl_nic *intelxl = netdev->priv;
1457 struct intelxl_rx_writeback_descriptor *rx_wb;
1458 struct io_buffer *iobuf;
1459 unsigned int rx_idx;
1460 unsigned int tag;
1461 size_t len;
1462
1463 /* Check for received packets */
1464 while ( intelxl->rx.cons != intelxl->rx.prod ) {
1465
1466 /* Get next receive descriptor */
1467 rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC );
1468 rx_wb = &intelxl->rx.desc.rx[rx_idx].wb;
1469
1470 /* Stop if descriptor is still in use */
1471 if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) )
1472 return;
1473
1474 /* Populate I/O buffer */
1475 iobuf = intelxl->rx_iobuf[rx_idx];
1476 intelxl->rx_iobuf[rx_idx] = NULL;
1477 len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) );
1478 iob_put ( iobuf, len );
1479
1480 /* Find VLAN device, if applicable */
1481 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_VLAN ) ) {
1482 tag = VLAN_TAG ( le16_to_cpu ( rx_wb->vlan ) );
1483 } else {
1484 tag = 0;
1485 }
1486
1487 /* Hand off to network stack */
1488 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_RXE ) ) {
1489 DBGC ( intelxl, "INTELXL %p RX %d error (length %zd, "
1490 "flags %08x)\n", intelxl, rx_idx, len,
1491 le32_to_cpu ( rx_wb->flags ) );
1492 vlan_netdev_rx_err ( netdev, tag, iobuf, -EIO );
1493 } else {
1494 DBGC2 ( intelxl, "INTELXL %p RX %d complete (length "
1495 "%zd)\n", intelxl, rx_idx, len );
1496 vlan_netdev_rx ( netdev, tag, iobuf );
1497 }
1498 intelxl->rx.cons++;
1499 }
1500 }
1501
1502 /**
1503 * Poll for completed and received packets
1504 *
1505 * @v netdev Network device
1506 */
1507 static void intelxl_poll ( struct net_device *netdev ) {
1508 struct intelxl_nic *intelxl = netdev->priv;
1509
1510 /* Acknowledge interrupts, if applicable */
1511 if ( netdev_irq_enabled ( netdev ) ) {
1512 writel ( ( INTELXL_PFINT_DYN_CTL0_CLEARPBA |
1513 INTELXL_PFINT_DYN_CTL0_INTENA_MASK ),
1514 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1515 }
1516
1517 /* Poll for completed packets */
1518 intelxl_poll_tx ( netdev );
1519
1520 /* Poll for received packets */
1521 intelxl_poll_rx ( netdev );
1522
1523 /* Poll for admin events */
1524 intelxl_poll_admin ( netdev );
1525
1526 /* Refill RX ring */
1527 intelxl_refill_rx ( intelxl );
1528 }
1529
1530 /**
1531 * Enable or disable interrupts
1532 *
1533 * @v netdev Network device
1534 * @v enable Interrupts should be enabled
1535 */
1536 static void intelxl_irq ( struct net_device *netdev, int enable ) {
1537 struct intelxl_nic *intelxl = netdev->priv;
1538
1539 if ( enable ) {
1540 writel ( INTELXL_PFINT_DYN_CTL0_INTENA,
1541 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1542 } else {
1543 writel ( 0, intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1544 }
1545 }
1546
1547 /** Network device operations */
1548 static struct net_device_operations intelxl_operations = {
1549 .open = intelxl_open,
1550 .close = intelxl_close,
1551 .transmit = intelxl_transmit,
1552 .poll = intelxl_poll,
1553 .irq = intelxl_irq,
1554 };
1555
1556 /******************************************************************************
1557 *
1558 * PCI interface
1559 *
1560 ******************************************************************************
1561 */
1562
1563 /**
1564 * Probe PCI device
1565 *
1566 * @v pci PCI device
1567 * @ret rc Return status code
1568 */
1569 static int intelxl_probe ( struct pci_device *pci ) {
1570 struct net_device *netdev;
1571 struct intelxl_nic *intelxl;
1572 uint32_t pfgen_portnum;
1573 uint32_t pflan_qalloc;
1574 int rc;
1575
1576 /* Allocate and initialise net device */
1577 netdev = alloc_etherdev ( sizeof ( *intelxl ) );
1578 if ( ! netdev ) {
1579 rc = -ENOMEM;
1580 goto err_alloc;
1581 }
1582 netdev_init ( netdev, &intelxl_operations );
1583 intelxl = netdev->priv;
1584 pci_set_drvdata ( pci, netdev );
1585 netdev->dev = &pci->dev;
1586 memset ( intelxl, 0, sizeof ( *intelxl ) );
1587 intelxl->pf = PCI_FUNC ( pci->busdevfn );
1588 intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD,
1589 &intelxl_admin_offsets );
1590 intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT,
1591 &intelxl_admin_offsets );
1592 intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
1593 sizeof ( intelxl->tx.desc.tx[0] ),
1594 intelxl_context_tx );
1595 intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
1596 sizeof ( intelxl->rx.desc.rx[0] ),
1597 intelxl_context_rx );
1598
1599 /* Fix up PCI device */
1600 adjust_pci_device ( pci );
1601
1602 /* Map registers */
1603 intelxl->regs = ioremap ( pci->membase, INTELXL_BAR_SIZE );
1604 if ( ! intelxl->regs ) {
1605 rc = -ENODEV;
1606 goto err_ioremap;
1607 }
1608
1609 /* Reset the NIC */
1610 if ( ( rc = intelxl_reset ( intelxl ) ) != 0 )
1611 goto err_reset;
1612
1613 /* Get port number and base queue number */
1614 pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM );
1615 intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum );
1616 pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC );
1617 intelxl->base = INTELXL_PFLAN_QALLOC_FIRSTQ ( pflan_qalloc );
1618 DBGC ( intelxl, "INTELXL %p PF %d using port %d queues [%#04x-%#04x]\n",
1619 intelxl, intelxl->pf, intelxl->port, intelxl->base,
1620 INTELXL_PFLAN_QALLOC_LASTQ ( pflan_qalloc ) );
1621
1622 /* Fetch MAC address and maximum frame size */
1623 if ( ( rc = intelxl_fetch_mac ( intelxl, netdev ) ) != 0 )
1624 goto err_fetch_mac;
1625
1626 /* Open admin queues */
1627 if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 )
1628 goto err_open_admin;
1629
1630 /* Get switch configuration */
1631 if ( ( rc = intelxl_admin_switch ( intelxl ) ) != 0 )
1632 goto err_admin_switch;
1633
1634 /* Get VSI configuration */
1635 if ( ( rc = intelxl_admin_vsi ( intelxl ) ) != 0 )
1636 goto err_admin_vsi;
1637
1638 /* Configure switch for promiscuous mode */
1639 if ( ( rc = intelxl_admin_promisc ( intelxl ) ) != 0 )
1640 goto err_admin_promisc;
1641
1642 /* Configure queue register addresses */
1643 intelxl->tx.reg = INTELXL_QTX ( intelxl->queue );
1644 intelxl->tx.tail = ( intelxl->tx.reg + INTELXL_QXX_TAIL );
1645 intelxl->rx.reg = INTELXL_QRX ( intelxl->queue );
1646 intelxl->rx.tail = ( intelxl->rx.reg + INTELXL_QXX_TAIL );
1647
1648 /* Configure interrupt causes */
1649 writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE |
1650 INTELXL_QINT_TQCTL_CAUSE_ENA ),
1651 intelxl->regs + INTELXL_QINT_TQCTL ( intelxl->queue ) );
1652 writel ( ( INTELXL_QINT_RQCTL_NEXTQ_INDX ( intelxl->queue ) |
1653 INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX |
1654 INTELXL_QINT_RQCTL_CAUSE_ENA ),
1655 intelxl->regs + INTELXL_QINT_RQCTL ( intelxl->queue ) );
1656 writel ( ( INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( intelxl->queue ) |
1657 INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX ),
1658 intelxl->regs + INTELXL_PFINT_LNKLST0 );
1659 writel ( INTELXL_PFINT_ICR0_ENA_ADMINQ,
1660 intelxl->regs + INTELXL_PFINT_ICR0_ENA );
1661
1662 /* Register network device */
1663 if ( ( rc = register_netdev ( netdev ) ) != 0 )
1664 goto err_register_netdev;
1665
1666 /* Set initial link state */
1667 intelxl_admin_link ( netdev );
1668
1669 return 0;
1670
1671 unregister_netdev ( netdev );
1672 err_register_netdev:
1673 err_admin_promisc:
1674 err_admin_vsi:
1675 err_admin_switch:
1676 intelxl_close_admin ( intelxl );
1677 err_open_admin:
1678 err_fetch_mac:
1679 intelxl_reset ( intelxl );
1680 err_reset:
1681 iounmap ( intelxl->regs );
1682 err_ioremap:
1683 netdev_nullify ( netdev );
1684 netdev_put ( netdev );
1685 err_alloc:
1686 return rc;
1687 }
1688
1689 /**
1690 * Remove PCI device
1691 *
1692 * @v pci PCI device
1693 */
1694 static void intelxl_remove ( struct pci_device *pci ) {
1695 struct net_device *netdev = pci_get_drvdata ( pci );
1696 struct intelxl_nic *intelxl = netdev->priv;
1697
1698 /* Unregister network device */
1699 unregister_netdev ( netdev );
1700
1701 /* Close admin queues */
1702 intelxl_close_admin ( intelxl );
1703
1704 /* Reset the NIC */
1705 intelxl_reset ( intelxl );
1706
1707 /* Free network device */
1708 iounmap ( intelxl->regs );
1709 netdev_nullify ( netdev );
1710 netdev_put ( netdev );
1711 }
1712
1713 /** PCI device IDs */
1714 static struct pci_device_id intelxl_nics[] = {
1715 PCI_ROM ( 0x8086, 0x1572, "x710-sfp", "X710 10GbE SFP+", 0 ),
1716 PCI_ROM ( 0x8086, 0x1574, "xl710-qemu", "Virtual XL710", 0 ),
1717 PCI_ROM ( 0x8086, 0x1580, "xl710-kx-b", "XL710 40GbE backplane", 0 ),
1718 PCI_ROM ( 0x8086, 0x1581, "xl710-kx-c", "XL710 10GbE backplane", 0 ),
1719 PCI_ROM ( 0x8086, 0x1583, "xl710-qda2", "XL710 40GbE QSFP+", 0 ),
1720 PCI_ROM ( 0x8086, 0x1584, "xl710-qda1", "XL710 40GbE QSFP+", 0 ),
1721 PCI_ROM ( 0x8086, 0x1585, "x710-qsfp", "X710 10GbE QSFP+", 0 ),
1722 PCI_ROM ( 0x8086, 0x1586, "x710-10gt", "X710 10GBASE-T", 0 ),
1723 PCI_ROM ( 0x8086, 0x1587, "x710-kr2", "XL710 20GbE backplane", 0 ),
1724 PCI_ROM ( 0x8086, 0x1588, "x710-kr2-a", "XL710 20GbE backplane", 0 ),
1725 PCI_ROM ( 0x8086, 0x1589, "x710-10gt4", "X710 10GBASE-T4", 0 ),
1726 PCI_ROM ( 0x8086, 0x158a, "xxv710", "XXV710 25GbE backplane", 0 ),
1727 PCI_ROM ( 0x8086, 0x158b, "xxv710-sfp28", "XXV710 25GbE SFP28", 0 ),
1728 PCI_ROM ( 0x8086, 0x37ce, "x722-kx", "X722 10GbE backplane", 0 ),
1729 PCI_ROM ( 0x8086, 0x37cf, "x722-qsfp", "X722 10GbE QSFP+", 0 ),
1730 PCI_ROM ( 0x8086, 0x37d0, "x722-sfp", "X722 10GbE SFP+", 0 ),
1731 PCI_ROM ( 0x8086, 0x37d1, "x722-1gt", "X722 1GBASE-T", 0 ),
1732 PCI_ROM ( 0x8086, 0x37d2, "x722-10gt", "X722 10GBASE-T", 0 ),
1733 PCI_ROM ( 0x8086, 0x37d3, "x722-sfp-i", "X722 10GbE SFP+", 0 ),
1734 };
1735
1736 /** PCI driver */
1737 struct pci_driver intelxl_driver __pci_driver = {
1738 .ids = intelxl_nics,
1739 .id_count = ( sizeof ( intelxl_nics ) / sizeof ( intelxl_nics[0] ) ),
1740 .probe = intelxl_probe,
1741 .remove = intelxl_remove,
1742 };