[intelxl] Allow admin cookie to hold extended opcode and return code
[ipxe.git] / src / drivers / net / intelxl.c
1 /*
2 * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <byteswap.h>
32 #include <ipxe/netdevice.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/if_ether.h>
35 #include <ipxe/vlan.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/malloc.h>
38 #include <ipxe/pci.h>
39 #include <ipxe/version.h>
40 #include "intelxl.h"
41
42 /** @file
43 *
44 * Intel 40 Gigabit Ethernet network card driver
45 *
46 */
47
48 static void intelxl_reopen_admin ( struct intelxl_nic *intelxl );
49
50 /******************************************************************************
51 *
52 * Device reset
53 *
54 ******************************************************************************
55 */
56
57 /**
58 * Reset hardware
59 *
60 * @v intelxl Intel device
61 * @ret rc Return status code
62 */
63 static int intelxl_reset ( struct intelxl_nic *intelxl ) {
64 uint32_t pfgen_ctrl;
65
66 /* Perform a global software reset */
67 pfgen_ctrl = readl ( intelxl->regs + INTELXL_PFGEN_CTRL );
68 writel ( ( pfgen_ctrl | INTELXL_PFGEN_CTRL_PFSWR ),
69 intelxl->regs + INTELXL_PFGEN_CTRL );
70 mdelay ( INTELXL_RESET_DELAY_MS );
71
72 return 0;
73 }
74
75 /******************************************************************************
76 *
77 * MAC address
78 *
79 ******************************************************************************
80 */
81
82 /**
83 * Fetch initial MAC address and maximum frame size
84 *
85 * @v intelxl Intel device
86 * @v netdev Network device
87 * @ret rc Return status code
88 */
89 static int intelxl_fetch_mac ( struct intelxl_nic *intelxl,
90 struct net_device *netdev ) {
91 union intelxl_receive_address mac;
92 uint32_t prtgl_sal;
93 uint32_t prtgl_sah;
94 size_t mfs;
95
96 /* Read NVM-loaded address */
97 prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL );
98 prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH );
99 mac.reg.low = cpu_to_le32 ( prtgl_sal );
100 mac.reg.high = cpu_to_le32 ( prtgl_sah );
101
102 /* Check that address is valid */
103 if ( ! is_valid_ether_addr ( mac.raw ) ) {
104 DBGC ( intelxl, "INTELXL %p has invalid MAC address (%s)\n",
105 intelxl, eth_ntoa ( mac.raw ) );
106 return -ENOENT;
107 }
108
109 /* Copy MAC address */
110 DBGC ( intelxl, "INTELXL %p has autoloaded MAC address %s\n",
111 intelxl, eth_ntoa ( mac.raw ) );
112 memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN );
113
114 /* Get maximum frame size */
115 mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah );
116 netdev->max_pkt_len = ( mfs - 4 /* CRC */ );
117
118 return 0;
119 }
120
121 /******************************************************************************
122 *
123 * Admin queue
124 *
125 ******************************************************************************
126 */
127
128 /** Admin queue register offsets */
129 static const struct intelxl_admin_offsets intelxl_admin_offsets = {
130 .bal = INTELXL_ADMIN_BAL,
131 .bah = INTELXL_ADMIN_BAH,
132 .len = INTELXL_ADMIN_LEN,
133 .head = INTELXL_ADMIN_HEAD,
134 .tail = INTELXL_ADMIN_TAIL,
135 };
136
137 /**
138 * Allocate admin queue
139 *
140 * @v intelxl Intel device
141 * @v admin Admin queue
142 * @ret rc Return status code
143 */
144 static int intelxl_alloc_admin ( struct intelxl_nic *intelxl,
145 struct intelxl_admin *admin ) {
146 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
147 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
148
149 /* Allocate admin queue */
150 admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
151 if ( ! admin->buf )
152 return -ENOMEM;
153 admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
154
155 DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf "
156 "[%08llx,%08llx)\n", intelxl,
157 ( ( admin == &intelxl->command ) ? 'T' : 'R' ),
158 ( ( unsigned long long ) virt_to_bus ( admin->desc ) ),
159 ( ( unsigned long long ) ( virt_to_bus ( admin->desc ) + len ) ),
160 ( ( unsigned long long ) virt_to_bus ( admin->buf ) ),
161 ( ( unsigned long long ) ( virt_to_bus ( admin->buf ) +
162 buf_len ) ) );
163 return 0;
164 }
165
166 /**
167 * Enable admin queue
168 *
169 * @v intelxl Intel device
170 * @v admin Admin queue
171 */
172 static void intelxl_enable_admin ( struct intelxl_nic *intelxl,
173 struct intelxl_admin *admin ) {
174 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
175 const struct intelxl_admin_offsets *regs = admin->regs;
176 void *admin_regs = ( intelxl->regs + admin->base );
177 physaddr_t address;
178
179 /* Initialise admin queue */
180 memset ( admin->desc, 0, len );
181
182 /* Reset head and tail registers */
183 writel ( 0, admin_regs + regs->head );
184 writel ( 0, admin_regs + regs->tail );
185
186 /* Reset queue index */
187 admin->index = 0;
188
189 /* Program queue address */
190 address = virt_to_bus ( admin->desc );
191 writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal );
192 if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
193 writel ( ( ( ( uint64_t ) address ) >> 32 ),
194 admin_regs + regs->bah );
195 } else {
196 writel ( 0, admin_regs + regs->bah );
197 }
198
199 /* Program queue length and enable queue */
200 writel ( ( INTELXL_ADMIN_LEN_LEN ( INTELXL_ADMIN_NUM_DESC ) |
201 INTELXL_ADMIN_LEN_ENABLE ),
202 admin_regs + regs->len );
203 }
204
205 /**
206 * Disable admin queue
207 *
208 * @v intelxl Intel device
209 * @v admin Admin queue
210 */
211 static void intelxl_disable_admin ( struct intelxl_nic *intelxl,
212 struct intelxl_admin *admin ) {
213 const struct intelxl_admin_offsets *regs = admin->regs;
214 void *admin_regs = ( intelxl->regs + admin->base );
215
216 /* Disable queue */
217 writel ( 0, admin_regs + regs->len );
218 }
219
220 /**
221 * Free admin queue
222 *
223 * @v intelxl Intel device
224 * @v admin Admin queue
225 */
226 static void intelxl_free_admin ( struct intelxl_nic *intelxl __unused,
227 struct intelxl_admin *admin ) {
228 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
229 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
230
231 /* Free queue */
232 free_dma ( admin->buf, ( buf_len + len ) );
233 }
234
235 /**
236 * Get next admin command queue descriptor
237 *
238 * @v intelxl Intel device
239 * @ret cmd Command descriptor
240 */
241 static struct intelxl_admin_descriptor *
242 intelxl_admin_command_descriptor ( struct intelxl_nic *intelxl ) {
243 struct intelxl_admin *admin = &intelxl->command;
244 struct intelxl_admin_descriptor *cmd;
245
246 /* Get and initialise next descriptor */
247 cmd = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
248 memset ( cmd, 0, sizeof ( *cmd ) );
249 return cmd;
250 }
251
252 /**
253 * Get next admin command queue data buffer
254 *
255 * @v intelxl Intel device
256 * @ret buf Data buffer
257 */
258 static union intelxl_admin_buffer *
259 intelxl_admin_command_buffer ( struct intelxl_nic *intelxl ) {
260 struct intelxl_admin *admin = &intelxl->command;
261 union intelxl_admin_buffer *buf;
262
263 /* Get next data buffer */
264 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
265 memset ( buf, 0, sizeof ( *buf ) );
266 return buf;
267 }
268
269 /**
270 * Initialise admin event queue descriptor
271 *
272 * @v intelxl Intel device
273 * @v index Event queue index
274 */
275 static void intelxl_admin_event_init ( struct intelxl_nic *intelxl,
276 unsigned int index ) {
277 struct intelxl_admin *admin = &intelxl->event;
278 struct intelxl_admin_descriptor *evt;
279 union intelxl_admin_buffer *buf;
280 uint64_t address;
281
282 /* Initialise descriptor */
283 evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
284 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
285 address = virt_to_bus ( buf );
286 evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
287 evt->len = cpu_to_le16 ( sizeof ( *buf ) );
288 evt->params.buffer.high = cpu_to_le32 ( address >> 32 );
289 evt->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
290 }
291
292 /**
293 * Issue admin queue command
294 *
295 * @v intelxl Intel device
296 * @ret rc Return status code
297 */
298 static int intelxl_admin_command ( struct intelxl_nic *intelxl ) {
299 struct intelxl_admin *admin = &intelxl->command;
300 const struct intelxl_admin_offsets *regs = admin->regs;
301 void *admin_regs = ( intelxl->regs + admin->base );
302 struct intelxl_admin_descriptor *cmd;
303 union intelxl_admin_buffer *buf;
304 uint64_t address;
305 uint32_t cookie;
306 unsigned int index;
307 unsigned int tail;
308 unsigned int i;
309 int rc;
310
311 /* Get next queue entry */
312 index = admin->index++;
313 tail = ( admin->index % INTELXL_ADMIN_NUM_DESC );
314 cmd = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
315 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
316 DBGC2 ( intelxl, "INTELXL %p admin command %#x opcode %#04x",
317 intelxl, index, le16_to_cpu ( cmd->opcode ) );
318 if ( cmd->vopcode )
319 DBGC2 ( intelxl, "/%#08x", le32_to_cpu ( cmd->vopcode ) );
320 DBGC2 ( intelxl, ":\n" );
321
322 /* Sanity checks */
323 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_DD ) ) );
324 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_CMP ) ) );
325 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_ERR ) ) );
326 assert ( cmd->ret == 0 );
327
328 /* Populate data buffer address if applicable */
329 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
330 address = virt_to_bus ( buf );
331 cmd->params.buffer.high = cpu_to_le32 ( address >> 32 );
332 cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
333 }
334
335 /* Populate cookie, if not being (ab)used for VF opcode */
336 if ( ! cmd->vopcode )
337 cmd->cookie = cpu_to_le32 ( index );
338
339 /* Record cookie */
340 cookie = cmd->cookie;
341
342 /* Post command descriptor */
343 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
344 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
345 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
346 le16_to_cpu ( cmd->len ) );
347 }
348 wmb();
349 writel ( tail, admin_regs + regs->tail );
350
351 /* Wait for completion */
352 for ( i = 0 ; i < INTELXL_ADMIN_MAX_WAIT_MS ; i++ ) {
353
354 /* If response is not complete, delay 1ms and retry */
355 if ( ! ( cmd->flags & INTELXL_ADMIN_FL_DD ) ) {
356 mdelay ( 1 );
357 continue;
358 }
359 DBGC2 ( intelxl, "INTELXL %p admin command %#x response:\n",
360 intelxl, index );
361 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd,
362 sizeof ( *cmd ) );
363
364 /* Check for cookie mismatch */
365 if ( cmd->cookie != cookie ) {
366 DBGC ( intelxl, "INTELXL %p admin command %#x bad "
367 "cookie %#x\n", intelxl, index,
368 le32_to_cpu ( cmd->cookie ) );
369 rc = -EPROTO;
370 goto err;
371 }
372
373 /* Check for errors */
374 if ( cmd->ret != 0 ) {
375 DBGC ( intelxl, "INTELXL %p admin command %#x error "
376 "%d\n", intelxl, index,
377 le16_to_cpu ( cmd->ret ) );
378 rc = -EIO;
379 goto err;
380 }
381
382 /* Success */
383 return 0;
384 }
385
386 rc = -ETIMEDOUT;
387 DBGC ( intelxl, "INTELXL %p timed out waiting for admin command %#x:\n",
388 intelxl, index );
389 err:
390 DBGC_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
391 return rc;
392 }
393
394 /**
395 * Get firmware version
396 *
397 * @v intelxl Intel device
398 * @ret rc Return status code
399 */
400 static int intelxl_admin_version ( struct intelxl_nic *intelxl ) {
401 struct intelxl_admin_descriptor *cmd;
402 struct intelxl_admin_version_params *version;
403 unsigned int api;
404 int rc;
405
406 /* Populate descriptor */
407 cmd = intelxl_admin_command_descriptor ( intelxl );
408 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VERSION );
409 version = &cmd->params.version;
410
411 /* Issue command */
412 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
413 return rc;
414 api = le16_to_cpu ( version->api.major );
415 DBGC ( intelxl, "INTELXL %p firmware v%d.%d API v%d.%d\n",
416 intelxl, le16_to_cpu ( version->firmware.major ),
417 le16_to_cpu ( version->firmware.minor ),
418 api, le16_to_cpu ( version->api.minor ) );
419
420 /* Check for API compatibility */
421 if ( api > INTELXL_ADMIN_API_MAJOR ) {
422 DBGC ( intelxl, "INTELXL %p unsupported API v%d\n",
423 intelxl, api );
424 return -ENOTSUP;
425 }
426
427 return 0;
428 }
429
430 /**
431 * Report driver version
432 *
433 * @v intelxl Intel device
434 * @ret rc Return status code
435 */
436 static int intelxl_admin_driver ( struct intelxl_nic *intelxl ) {
437 struct intelxl_admin_descriptor *cmd;
438 struct intelxl_admin_driver_params *driver;
439 union intelxl_admin_buffer *buf;
440 int rc;
441
442 /* Populate descriptor */
443 cmd = intelxl_admin_command_descriptor ( intelxl );
444 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_DRIVER );
445 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF );
446 cmd->len = cpu_to_le16 ( sizeof ( buf->driver ) );
447 driver = &cmd->params.driver;
448 driver->major = product_major_version;
449 driver->minor = product_minor_version;
450 buf = intelxl_admin_command_buffer ( intelxl );
451 snprintf ( buf->driver.name, sizeof ( buf->driver.name ), "%s",
452 ( product_name[0] ? product_name : product_short_name ) );
453
454 /* Issue command */
455 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
456 return rc;
457
458 return 0;
459 }
460
461 /**
462 * Shutdown admin queues
463 *
464 * @v intelxl Intel device
465 * @ret rc Return status code
466 */
467 static int intelxl_admin_shutdown ( struct intelxl_nic *intelxl ) {
468 struct intelxl_admin_descriptor *cmd;
469 struct intelxl_admin_shutdown_params *shutdown;
470 int rc;
471
472 /* Populate descriptor */
473 cmd = intelxl_admin_command_descriptor ( intelxl );
474 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SHUTDOWN );
475 shutdown = &cmd->params.shutdown;
476 shutdown->unloading = INTELXL_ADMIN_SHUTDOWN_UNLOADING;
477
478 /* Issue command */
479 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
480 return rc;
481
482 return 0;
483 }
484
485 /**
486 * Get switch configuration
487 *
488 * @v intelxl Intel device
489 * @ret rc Return status code
490 */
491 static int intelxl_admin_switch ( struct intelxl_nic *intelxl ) {
492 struct intelxl_admin_descriptor *cmd;
493 struct intelxl_admin_switch_params *sw;
494 union intelxl_admin_buffer *buf;
495 int rc;
496
497 /* Populate descriptor */
498 cmd = intelxl_admin_command_descriptor ( intelxl );
499 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SWITCH );
500 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
501 cmd->len = cpu_to_le16 ( sizeof ( buf->sw ) );
502 sw = &cmd->params.sw;
503 buf = intelxl_admin_command_buffer ( intelxl );
504
505 /* Get each configuration in turn */
506 do {
507 /* Issue command */
508 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
509 return rc;
510
511 /* Dump raw configuration */
512 DBGC2 ( intelxl, "INTELXL %p SEID %#04x:\n",
513 intelxl, le16_to_cpu ( buf->sw.cfg.seid ) );
514 DBGC2_HDA ( intelxl, 0, &buf->sw.cfg, sizeof ( buf->sw.cfg ) );
515
516 /* Parse response */
517 if ( buf->sw.cfg.type == INTELXL_ADMIN_SWITCH_TYPE_VSI ) {
518 intelxl->vsi = le16_to_cpu ( buf->sw.cfg.seid );
519 DBGC ( intelxl, "INTELXL %p VSI %#04x uplink %#04x "
520 "downlink %#04x conn %#02x\n", intelxl,
521 intelxl->vsi, le16_to_cpu ( buf->sw.cfg.uplink ),
522 le16_to_cpu ( buf->sw.cfg.downlink ),
523 buf->sw.cfg.connection );
524 }
525
526 } while ( sw->next );
527
528 /* Check that we found a VSI */
529 if ( ! intelxl->vsi ) {
530 DBGC ( intelxl, "INTELXL %p has no VSI\n", intelxl );
531 return -ENOENT;
532 }
533
534 return 0;
535 }
536
537 /**
538 * Get VSI parameters
539 *
540 * @v intelxl Intel device
541 * @ret rc Return status code
542 */
543 static int intelxl_admin_vsi ( struct intelxl_nic *intelxl ) {
544 struct intelxl_admin_descriptor *cmd;
545 struct intelxl_admin_vsi_params *vsi;
546 union intelxl_admin_buffer *buf;
547 int rc;
548
549 /* Populate descriptor */
550 cmd = intelxl_admin_command_descriptor ( intelxl );
551 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VSI );
552 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
553 cmd->len = cpu_to_le16 ( sizeof ( buf->vsi ) );
554 vsi = &cmd->params.vsi;
555 vsi->vsi = cpu_to_le16 ( intelxl->vsi );
556 buf = intelxl_admin_command_buffer ( intelxl );
557
558 /* Issue command */
559 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
560 return rc;
561
562 /* Parse response */
563 intelxl->queue = le16_to_cpu ( buf->vsi.queue[0] );
564 intelxl->qset = le16_to_cpu ( buf->vsi.qset[0] );
565 DBGC ( intelxl, "INTELXL %p VSI %#04x queue %#04x qset %#04x\n",
566 intelxl, intelxl->vsi, intelxl->queue, intelxl->qset );
567
568 return 0;
569 }
570
571 /**
572 * Set VSI promiscuous modes
573 *
574 * @v intelxl Intel device
575 * @ret rc Return status code
576 */
577 static int intelxl_admin_promisc ( struct intelxl_nic *intelxl ) {
578 struct intelxl_admin_descriptor *cmd;
579 struct intelxl_admin_promisc_params *promisc;
580 uint16_t flags;
581 int rc;
582
583 /* Populate descriptor */
584 cmd = intelxl_admin_command_descriptor ( intelxl );
585 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_PROMISC );
586 flags = ( INTELXL_ADMIN_PROMISC_FL_UNICAST |
587 INTELXL_ADMIN_PROMISC_FL_MULTICAST |
588 INTELXL_ADMIN_PROMISC_FL_BROADCAST |
589 INTELXL_ADMIN_PROMISC_FL_VLAN );
590 promisc = &cmd->params.promisc;
591 promisc->flags = cpu_to_le16 ( flags );
592 promisc->valid = cpu_to_le16 ( flags );
593 promisc->vsi = cpu_to_le16 ( intelxl->vsi );
594
595 /* Issue command */
596 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
597 return rc;
598
599 return 0;
600 }
601
602 /**
603 * Restart autonegotiation
604 *
605 * @v intelxl Intel device
606 * @ret rc Return status code
607 */
608 static int intelxl_admin_autoneg ( struct intelxl_nic *intelxl ) {
609 struct intelxl_admin_descriptor *cmd;
610 struct intelxl_admin_autoneg_params *autoneg;
611 int rc;
612
613 /* Populate descriptor */
614 cmd = intelxl_admin_command_descriptor ( intelxl );
615 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_AUTONEG );
616 autoneg = &cmd->params.autoneg;
617 autoneg->flags = ( INTELXL_ADMIN_AUTONEG_FL_RESTART |
618 INTELXL_ADMIN_AUTONEG_FL_ENABLE );
619
620 /* Issue command */
621 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
622 return rc;
623
624 return 0;
625 }
626
627 /**
628 * Get link status
629 *
630 * @v netdev Network device
631 * @ret rc Return status code
632 */
633 static int intelxl_admin_link ( struct net_device *netdev ) {
634 struct intelxl_nic *intelxl = netdev->priv;
635 struct intelxl_admin_descriptor *cmd;
636 struct intelxl_admin_link_params *link;
637 int rc;
638
639 /* Populate descriptor */
640 cmd = intelxl_admin_command_descriptor ( intelxl );
641 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_LINK );
642 link = &cmd->params.link;
643 link->notify = INTELXL_ADMIN_LINK_NOTIFY;
644
645 /* Issue command */
646 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
647 return rc;
648 DBGC ( intelxl, "INTELXL %p PHY %#02x speed %#02x status %#02x\n",
649 intelxl, link->phy, link->speed, link->status );
650
651 /* Update network device */
652 if ( link->status & INTELXL_ADMIN_LINK_UP ) {
653 netdev_link_up ( netdev );
654 } else {
655 netdev_link_down ( netdev );
656 }
657
658 return 0;
659 }
660
661 /**
662 * Refill admin event queue
663 *
664 * @v intelxl Intel device
665 */
666 static void intelxl_refill_admin ( struct intelxl_nic *intelxl ) {
667 struct intelxl_admin *admin = &intelxl->event;
668 const struct intelxl_admin_offsets *regs = admin->regs;
669 void *admin_regs = ( intelxl->regs + admin->base );
670 unsigned int tail;
671
672 /* Update tail pointer */
673 tail = ( ( admin->index + INTELXL_ADMIN_NUM_DESC - 1 ) %
674 INTELXL_ADMIN_NUM_DESC );
675 wmb();
676 writel ( tail, admin_regs + regs->tail );
677 }
678
679 /**
680 * Poll admin event queue
681 *
682 * @v netdev Network device
683 */
684 static void intelxl_poll_admin ( struct net_device *netdev ) {
685 struct intelxl_nic *intelxl = netdev->priv;
686 struct intelxl_admin *admin = &intelxl->event;
687 struct intelxl_admin_descriptor *evt;
688 union intelxl_admin_buffer *buf;
689
690 /* Check for events */
691 while ( 1 ) {
692
693 /* Get next event descriptor and data buffer */
694 evt = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
695 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
696
697 /* Stop if descriptor is not yet completed */
698 if ( ! ( evt->flags & INTELXL_ADMIN_FL_DD ) )
699 return;
700 DBGC2 ( intelxl, "INTELXL %p admin event %#x:\n",
701 intelxl, admin->index );
702 DBGC2_HDA ( intelxl, virt_to_phys ( evt ), evt,
703 sizeof ( *evt ) );
704 if ( evt->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
705 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
706 le16_to_cpu ( evt->len ) );
707 }
708
709 /* Handle event */
710 switch ( evt->opcode ) {
711 case cpu_to_le16 ( INTELXL_ADMIN_LINK ):
712 intelxl_admin_link ( netdev );
713 break;
714 default:
715 DBGC ( intelxl, "INTELXL %p admin event %#x "
716 "unrecognised opcode %#04x\n", intelxl,
717 admin->index, le16_to_cpu ( evt->opcode ) );
718 break;
719 }
720
721 /* Reset descriptor and refill queue */
722 intelxl_admin_event_init ( intelxl, admin->index );
723 admin->index++;
724 intelxl_refill_admin ( intelxl );
725 }
726 }
727
728 /**
729 * Open admin queues
730 *
731 * @v intelxl Intel device
732 * @ret rc Return status code
733 */
734 static int intelxl_open_admin ( struct intelxl_nic *intelxl ) {
735 int rc;
736
737 /* Allocate admin event queue */
738 if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->event ) ) != 0 )
739 goto err_alloc_event;
740
741 /* Allocate admin command queue */
742 if ( ( rc = intelxl_alloc_admin ( intelxl, &intelxl->command ) ) != 0 )
743 goto err_alloc_command;
744
745 /* (Re)open admin queues */
746 intelxl_reopen_admin ( intelxl );
747
748 /* Get firmware version */
749 if ( ( rc = intelxl_admin_version ( intelxl ) ) != 0 )
750 goto err_version;
751
752 /* Report driver version */
753 if ( ( rc = intelxl_admin_driver ( intelxl ) ) != 0 )
754 goto err_driver;
755
756 return 0;
757
758 err_driver:
759 err_version:
760 intelxl_disable_admin ( intelxl, &intelxl->command );
761 intelxl_disable_admin ( intelxl, &intelxl->event );
762 intelxl_free_admin ( intelxl, &intelxl->command );
763 err_alloc_command:
764 intelxl_free_admin ( intelxl, &intelxl->event );
765 err_alloc_event:
766 return rc;
767 }
768
769 /**
770 * Reopen admin queues (after virtual function reset)
771 *
772 * @v intelxl Intel device
773 */
774 static void intelxl_reopen_admin ( struct intelxl_nic *intelxl ) {
775 unsigned int i;
776
777 /* Enable admin event queue */
778 intelxl_enable_admin ( intelxl, &intelxl->event );
779
780 /* Enable admin command queue */
781 intelxl_enable_admin ( intelxl, &intelxl->command );
782
783 /* Initialise all admin event queue descriptors */
784 for ( i = 0 ; i < INTELXL_ADMIN_NUM_DESC ; i++ )
785 intelxl_admin_event_init ( intelxl, i );
786
787 /* Post all descriptors to event queue */
788 intelxl_refill_admin ( intelxl );
789 }
790
791 /**
792 * Close admin queues
793 *
794 * @v intelxl Intel device
795 */
796 static void intelxl_close_admin ( struct intelxl_nic *intelxl ) {
797
798 /* Shut down admin queues */
799 intelxl_admin_shutdown ( intelxl );
800
801 /* Disable admin queues */
802 intelxl_disable_admin ( intelxl, &intelxl->command );
803 intelxl_disable_admin ( intelxl, &intelxl->event );
804
805 /* Free admin queues */
806 intelxl_free_admin ( intelxl, &intelxl->command );
807 intelxl_free_admin ( intelxl, &intelxl->event );
808 }
809
810 /******************************************************************************
811 *
812 * Descriptor rings
813 *
814 ******************************************************************************
815 */
816
817 /**
818 * Dump queue context (for debugging)
819 *
820 * @v intelxl Intel device
821 * @v op Context operation
822 * @v len Size of context
823 */
824 static __attribute__ (( unused )) void
825 intelxl_context_dump ( struct intelxl_nic *intelxl, uint32_t op, size_t len ) {
826 struct intelxl_context_line line;
827 uint32_t pfcm_lanctxctl;
828 uint32_t pfcm_lanctxstat;
829 unsigned int queue;
830 unsigned int index;
831 unsigned int i;
832
833 /* Do nothing unless debug output is enabled */
834 if ( ! DBG_EXTRA )
835 return;
836
837 /* Dump context */
838 DBGC2 ( intelxl, "INTELXL %p context %#08x:\n", intelxl, op );
839 for ( index = 0 ; ( sizeof ( line ) * index ) < len ; index++ ) {
840
841 /* Start context operation */
842 queue = ( intelxl->base + intelxl->queue );
843 pfcm_lanctxctl =
844 ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
845 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
846 INTELXL_PFCM_LANCTXCTL_OP_CODE_READ | op );
847 writel ( pfcm_lanctxctl,
848 intelxl->regs + INTELXL_PFCM_LANCTXCTL );
849
850 /* Wait for operation to complete */
851 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
852
853 /* Check if operation is complete */
854 pfcm_lanctxstat = readl ( intelxl->regs +
855 INTELXL_PFCM_LANCTXSTAT );
856 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
857 break;
858
859 /* Delay */
860 mdelay ( 1 );
861 }
862
863 /* Read context data */
864 for ( i = 0 ; i < ( sizeof ( line ) /
865 sizeof ( line.raw[0] ) ) ; i++ ) {
866 line.raw[i] = readl ( intelxl->regs +
867 INTELXL_PFCM_LANCTXDATA ( i ) );
868 }
869 DBGC2_HDA ( intelxl, ( sizeof ( line ) * index ),
870 &line, sizeof ( line ) );
871 }
872 }
873
874 /**
875 * Program queue context line
876 *
877 * @v intelxl Intel device
878 * @v line Queue context line
879 * @v index Line number
880 * @v op Context operation
881 * @ret rc Return status code
882 */
883 static int intelxl_context_line ( struct intelxl_nic *intelxl,
884 struct intelxl_context_line *line,
885 unsigned int index, uint32_t op ) {
886 uint32_t pfcm_lanctxctl;
887 uint32_t pfcm_lanctxstat;
888 unsigned int queue;
889 unsigned int i;
890
891 /* Write context data */
892 for ( i = 0; i < ( sizeof ( *line ) / sizeof ( line->raw[0] ) ); i++ ) {
893 writel ( le32_to_cpu ( line->raw[i] ),
894 intelxl->regs + INTELXL_PFCM_LANCTXDATA ( i ) );
895 }
896
897 /* Start context operation */
898 queue = ( intelxl->base + intelxl->queue );
899 pfcm_lanctxctl = ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
900 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
901 INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE | op );
902 writel ( pfcm_lanctxctl, intelxl->regs + INTELXL_PFCM_LANCTXCTL );
903
904 /* Wait for operation to complete */
905 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
906
907 /* Check if operation is complete */
908 pfcm_lanctxstat = readl ( intelxl->regs +
909 INTELXL_PFCM_LANCTXSTAT );
910 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
911 return 0;
912
913 /* Delay */
914 mdelay ( 1 );
915 }
916
917 DBGC ( intelxl, "INTELXL %p timed out waiting for context: %#08x\n",
918 intelxl, pfcm_lanctxctl );
919 return -ETIMEDOUT;
920 }
921
922 /**
923 * Program queue context
924 *
925 * @v intelxl Intel device
926 * @v line Queue context lines
927 * @v len Size of context
928 * @v op Context operation
929 * @ret rc Return status code
930 */
931 static int intelxl_context ( struct intelxl_nic *intelxl,
932 struct intelxl_context_line *line,
933 size_t len, uint32_t op ) {
934 unsigned int index;
935 int rc;
936
937 DBGC2 ( intelxl, "INTELXL %p context %#08x len %#zx:\n",
938 intelxl, op, len );
939 DBGC2_HDA ( intelxl, 0, line, len );
940
941 /* Program one line at a time */
942 for ( index = 0 ; ( sizeof ( *line ) * index ) < len ; index++ ) {
943 if ( ( rc = intelxl_context_line ( intelxl, line++, index,
944 op ) ) != 0 )
945 return rc;
946 }
947
948 return 0;
949 }
950
951 /**
952 * Program transmit queue context
953 *
954 * @v intelxl Intel device
955 * @v address Descriptor ring base address
956 * @ret rc Return status code
957 */
958 static int intelxl_context_tx ( struct intelxl_nic *intelxl,
959 physaddr_t address ) {
960 union {
961 struct intelxl_context_tx tx;
962 struct intelxl_context_line line;
963 } ctx;
964 int rc;
965
966 /* Initialise context */
967 memset ( &ctx, 0, sizeof ( ctx ) );
968 ctx.tx.flags = cpu_to_le16 ( INTELXL_CTX_TX_FL_NEW );
969 ctx.tx.base = cpu_to_le64 ( INTELXL_CTX_TX_BASE ( address ) );
970 ctx.tx.count =
971 cpu_to_le16 ( INTELXL_CTX_TX_COUNT ( INTELXL_TX_NUM_DESC ) );
972 ctx.tx.qset = INTELXL_CTX_TX_QSET ( intelxl->qset );
973
974 /* Program context */
975 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
976 INTELXL_PFCM_LANCTXCTL_TYPE_TX ) ) != 0 )
977 return rc;
978
979 return 0;
980 }
981
982 /**
983 * Program receive queue context
984 *
985 * @v intelxl Intel device
986 * @v address Descriptor ring base address
987 * @ret rc Return status code
988 */
989 static int intelxl_context_rx ( struct intelxl_nic *intelxl,
990 physaddr_t address ) {
991 union {
992 struct intelxl_context_rx rx;
993 struct intelxl_context_line line;
994 } ctx;
995 uint64_t base_count;
996 int rc;
997
998 /* Initialise context */
999 memset ( &ctx, 0, sizeof ( ctx ) );
1000 base_count = INTELXL_CTX_RX_BASE_COUNT ( address, INTELXL_RX_NUM_DESC );
1001 ctx.rx.base_count = cpu_to_le64 ( base_count );
1002 ctx.rx.len = cpu_to_le16 ( INTELXL_CTX_RX_LEN ( intelxl->mfs ) );
1003 ctx.rx.flags = INTELXL_CTX_RX_FL_CRCSTRIP;
1004 ctx.rx.mfs = cpu_to_le16 ( INTELXL_CTX_RX_MFS ( intelxl->mfs ) );
1005
1006 /* Program context */
1007 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
1008 INTELXL_PFCM_LANCTXCTL_TYPE_RX ) ) != 0 )
1009 return rc;
1010
1011 return 0;
1012 }
1013
1014 /**
1015 * Enable descriptor ring
1016 *
1017 * @v intelxl Intel device
1018 * @v ring Descriptor ring
1019 * @ret rc Return status code
1020 */
1021 static int intelxl_enable_ring ( struct intelxl_nic *intelxl,
1022 struct intelxl_ring *ring ) {
1023 void *ring_regs = ( intelxl->regs + ring->reg );
1024 uint32_t qxx_ena;
1025
1026 /* Enable ring */
1027 writel ( INTELXL_QXX_ENA_REQ, ( ring_regs + INTELXL_QXX_ENA ) );
1028 udelay ( INTELXL_QUEUE_ENABLE_DELAY_US );
1029 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1030 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) {
1031 DBGC ( intelxl, "INTELXL %p ring %06x failed to enable: "
1032 "%#08x\n", intelxl, ring->reg, qxx_ena );
1033 return -EIO;
1034 }
1035
1036 return 0;
1037 }
1038
1039 /**
1040 * Disable descriptor ring
1041 *
1042 * @v intelxl Intel device
1043 * @v ring Descriptor ring
1044 * @ret rc Return status code
1045 */
1046 static int intelxl_disable_ring ( struct intelxl_nic *intelxl,
1047 struct intelxl_ring *ring ) {
1048 void *ring_regs = ( intelxl->regs + ring->reg );
1049 uint32_t qxx_ena;
1050 unsigned int i;
1051
1052 /* Disable ring */
1053 writel ( 0, ( ring_regs + INTELXL_QXX_ENA ) );
1054
1055 /* Wait for ring to be disabled */
1056 for ( i = 0 ; i < INTELXL_QUEUE_DISABLE_MAX_WAIT_MS ; i++ ) {
1057
1058 /* Check if ring is disabled */
1059 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1060 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) )
1061 return 0;
1062
1063 /* Delay */
1064 mdelay ( 1 );
1065 }
1066
1067 DBGC ( intelxl, "INTELXL %p ring %06x timed out waiting for disable: "
1068 "%#08x\n", intelxl, ring->reg, qxx_ena );
1069 return -ETIMEDOUT;
1070 }
1071
1072 /**
1073 * Create descriptor ring
1074 *
1075 * @v intelxl Intel device
1076 * @v ring Descriptor ring
1077 * @ret rc Return status code
1078 */
1079 static int intelxl_create_ring ( struct intelxl_nic *intelxl,
1080 struct intelxl_ring *ring ) {
1081 void *ring_regs = ( intelxl->regs + ring->reg );
1082 physaddr_t address;
1083 int rc;
1084
1085 /* Allocate descriptor ring */
1086 ring->desc = malloc_dma ( ring->len, INTELXL_ALIGN );
1087 if ( ! ring->desc ) {
1088 rc = -ENOMEM;
1089 goto err_alloc;
1090 }
1091
1092 /* Initialise descriptor ring */
1093 memset ( ring->desc, 0, ring->len );
1094
1095 /* Reset tail pointer */
1096 writel ( 0, ( ring_regs + INTELXL_QXX_TAIL ) );
1097
1098 /* Program queue context */
1099 address = virt_to_bus ( ring->desc );
1100 if ( ( rc = ring->context ( intelxl, address ) ) != 0 )
1101 goto err_context;
1102
1103 /* Enable ring */
1104 if ( ( rc = intelxl_enable_ring ( intelxl, ring ) ) != 0 )
1105 goto err_enable;
1106
1107 /* Reset counters */
1108 ring->prod = 0;
1109 ring->cons = 0;
1110
1111 DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n",
1112 intelxl, ring->reg, ( ( unsigned long long ) address ),
1113 ( ( unsigned long long ) address + ring->len ) );
1114
1115 return 0;
1116
1117 intelxl_disable_ring ( intelxl, ring );
1118 err_enable:
1119 err_context:
1120 free_dma ( ring->desc, ring->len );
1121 err_alloc:
1122 return rc;
1123 }
1124
1125 /**
1126 * Destroy descriptor ring
1127 *
1128 * @v intelxl Intel device
1129 * @v ring Descriptor ring
1130 */
1131 static void intelxl_destroy_ring ( struct intelxl_nic *intelxl,
1132 struct intelxl_ring *ring ) {
1133 int rc;
1134
1135 /* Disable ring */
1136 if ( ( rc = intelxl_disable_ring ( intelxl, ring ) ) != 0 ) {
1137 /* Leak memory; there's nothing else we can do */
1138 return;
1139 }
1140
1141 /* Free descriptor ring */
1142 free_dma ( ring->desc, ring->len );
1143 ring->desc = NULL;
1144 }
1145
1146 /**
1147 * Refill receive descriptor ring
1148 *
1149 * @v intelxl Intel device
1150 */
1151 static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
1152 struct intelxl_rx_data_descriptor *rx;
1153 struct io_buffer *iobuf;
1154 unsigned int rx_idx;
1155 unsigned int rx_tail;
1156 physaddr_t address;
1157 unsigned int refilled = 0;
1158
1159 /* Refill ring */
1160 while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) {
1161
1162 /* Allocate I/O buffer */
1163 iobuf = alloc_iob ( intelxl->mfs );
1164 if ( ! iobuf ) {
1165 /* Wait for next refill */
1166 break;
1167 }
1168
1169 /* Get next receive descriptor */
1170 rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC );
1171 rx = &intelxl->rx.desc[rx_idx].rx;
1172
1173 /* Populate receive descriptor */
1174 address = virt_to_bus ( iobuf->data );
1175 rx->address = cpu_to_le64 ( address );
1176 rx->flags = 0;
1177
1178 /* Record I/O buffer */
1179 assert ( intelxl->rx_iobuf[rx_idx] == NULL );
1180 intelxl->rx_iobuf[rx_idx] = iobuf;
1181
1182 DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl,
1183 rx_idx, ( ( unsigned long long ) address ),
1184 ( ( unsigned long long ) address + intelxl->mfs ) );
1185 refilled++;
1186 }
1187
1188 /* Push descriptors to card, if applicable */
1189 if ( refilled ) {
1190 wmb();
1191 rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC );
1192 writel ( rx_tail,
1193 ( intelxl->regs + intelxl->rx.reg + INTELXL_QXX_TAIL));
1194 }
1195 }
1196
1197 /******************************************************************************
1198 *
1199 * Network device interface
1200 *
1201 ******************************************************************************
1202 */
1203
1204 /**
1205 * Open network device
1206 *
1207 * @v netdev Network device
1208 * @ret rc Return status code
1209 */
1210 static int intelxl_open ( struct net_device *netdev ) {
1211 struct intelxl_nic *intelxl = netdev->priv;
1212 union intelxl_receive_address mac;
1213 unsigned int queue;
1214 uint32_t prtgl_sal;
1215 uint32_t prtgl_sah;
1216 int rc;
1217
1218 /* Calculate maximum frame size */
1219 intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ +
1220 INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) );
1221
1222 /* Program MAC address and maximum frame size */
1223 memset ( &mac, 0, sizeof ( mac ) );
1224 memcpy ( mac.raw, netdev->ll_addr, sizeof ( mac.raw ) );
1225 prtgl_sal = le32_to_cpu ( mac.reg.low );
1226 prtgl_sah = ( le32_to_cpu ( mac.reg.high ) |
1227 INTELXL_PRTGL_SAH_MFS_SET ( intelxl->mfs ) );
1228 writel ( prtgl_sal, intelxl->regs + INTELXL_PRTGL_SAL );
1229 writel ( prtgl_sah, intelxl->regs + INTELXL_PRTGL_SAH );
1230
1231 /* Associate transmit queue to PF */
1232 writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF |
1233 INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ),
1234 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) );
1235
1236 /* Clear transmit pre queue disable */
1237 queue = ( intelxl->base + intelxl->queue );
1238 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS |
1239 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1240 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1241
1242 /* Reset transmit queue head */
1243 writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) );
1244
1245 /* Create receive descriptor ring */
1246 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 )
1247 goto err_create_rx;
1248
1249 /* Create transmit descriptor ring */
1250 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 )
1251 goto err_create_tx;
1252
1253 /* Fill receive ring */
1254 intelxl_refill_rx ( intelxl );
1255
1256 /* Restart autonegotiation */
1257 intelxl_admin_autoneg ( intelxl );
1258
1259 /* Update link state */
1260 intelxl_admin_link ( netdev );
1261
1262 return 0;
1263
1264 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1265 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1266 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1267 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1268 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1269 err_create_tx:
1270 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1271 err_create_rx:
1272 return rc;
1273 }
1274
1275 /**
1276 * Close network device
1277 *
1278 * @v netdev Network device
1279 */
1280 static void intelxl_close ( struct net_device *netdev ) {
1281 struct intelxl_nic *intelxl = netdev->priv;
1282 unsigned int queue;
1283 unsigned int i;
1284
1285 /* Dump contexts (for debugging) */
1286 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_TX,
1287 sizeof ( struct intelxl_context_tx ) );
1288 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_RX,
1289 sizeof ( struct intelxl_context_rx ) );
1290
1291 /* Pre-disable transmit queue */
1292 queue = ( intelxl->base + intelxl->queue );
1293 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1294 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1295 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1296 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1297
1298 /* Destroy transmit descriptor ring */
1299 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1300
1301 /* Destroy receive descriptor ring */
1302 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1303
1304 /* Discard any unused receive buffers */
1305 for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
1306 if ( intelxl->rx_iobuf[i] )
1307 free_iob ( intelxl->rx_iobuf[i] );
1308 intelxl->rx_iobuf[i] = NULL;
1309 }
1310 }
1311
1312 /**
1313 * Transmit packet
1314 *
1315 * @v netdev Network device
1316 * @v iobuf I/O buffer
1317 * @ret rc Return status code
1318 */
1319 static int intelxl_transmit ( struct net_device *netdev,
1320 struct io_buffer *iobuf ) {
1321 struct intelxl_nic *intelxl = netdev->priv;
1322 struct intelxl_tx_data_descriptor *tx;
1323 unsigned int tx_idx;
1324 unsigned int tx_tail;
1325 physaddr_t address;
1326 size_t len;
1327
1328 /* Get next transmit descriptor */
1329 if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) {
1330 DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n",
1331 intelxl );
1332 return -ENOBUFS;
1333 }
1334 tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC );
1335 tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC );
1336 tx = &intelxl->tx.desc[tx_idx].tx;
1337
1338 /* Populate transmit descriptor */
1339 address = virt_to_bus ( iobuf->data );
1340 len = iob_len ( iobuf );
1341 tx->address = cpu_to_le64 ( address );
1342 tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
1343 tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
1344 INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );
1345 wmb();
1346
1347 /* Notify card that there are packets ready to transmit */
1348 writel ( tx_tail,
1349 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_TAIL ) );
1350
1351 DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx,
1352 ( ( unsigned long long ) address ),
1353 ( ( unsigned long long ) address + len ) );
1354 return 0;
1355 }
1356
1357 /**
1358 * Poll for completed packets
1359 *
1360 * @v netdev Network device
1361 */
1362 static void intelxl_poll_tx ( struct net_device *netdev ) {
1363 struct intelxl_nic *intelxl = netdev->priv;
1364 struct intelxl_tx_writeback_descriptor *tx_wb;
1365 unsigned int tx_idx;
1366
1367 /* Check for completed packets */
1368 while ( intelxl->tx.cons != intelxl->tx.prod ) {
1369
1370 /* Get next transmit descriptor */
1371 tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC );
1372 tx_wb = &intelxl->tx.desc[tx_idx].tx_wb;
1373
1374 /* Stop if descriptor is still in use */
1375 if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) )
1376 return;
1377 DBGC2 ( intelxl, "INTELXL %p TX %d complete\n",
1378 intelxl, tx_idx );
1379
1380 /* Complete TX descriptor */
1381 netdev_tx_complete_next ( netdev );
1382 intelxl->tx.cons++;
1383 }
1384 }
1385
1386 /**
1387 * Poll for received packets
1388 *
1389 * @v netdev Network device
1390 */
1391 static void intelxl_poll_rx ( struct net_device *netdev ) {
1392 struct intelxl_nic *intelxl = netdev->priv;
1393 struct intelxl_rx_writeback_descriptor *rx_wb;
1394 struct io_buffer *iobuf;
1395 unsigned int rx_idx;
1396 unsigned int tag;
1397 size_t len;
1398
1399 /* Check for received packets */
1400 while ( intelxl->rx.cons != intelxl->rx.prod ) {
1401
1402 /* Get next receive descriptor */
1403 rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC );
1404 rx_wb = &intelxl->rx.desc[rx_idx].rx_wb;
1405
1406 /* Stop if descriptor is still in use */
1407 if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) )
1408 return;
1409
1410 /* Populate I/O buffer */
1411 iobuf = intelxl->rx_iobuf[rx_idx];
1412 intelxl->rx_iobuf[rx_idx] = NULL;
1413 len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) );
1414 iob_put ( iobuf, len );
1415
1416 /* Find VLAN device, if applicable */
1417 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_VLAN ) ) {
1418 tag = VLAN_TAG ( le16_to_cpu ( rx_wb->vlan ) );
1419 } else {
1420 tag = 0;
1421 }
1422
1423 /* Hand off to network stack */
1424 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_RXE ) ) {
1425 DBGC ( intelxl, "INTELXL %p RX %d error (length %zd, "
1426 "flags %08x)\n", intelxl, rx_idx, len,
1427 le32_to_cpu ( rx_wb->flags ) );
1428 vlan_netdev_rx_err ( netdev, tag, iobuf, -EIO );
1429 } else {
1430 DBGC2 ( intelxl, "INTELXL %p RX %d complete (length "
1431 "%zd)\n", intelxl, rx_idx, len );
1432 vlan_netdev_rx ( netdev, tag, iobuf );
1433 }
1434 intelxl->rx.cons++;
1435 }
1436 }
1437
1438 /**
1439 * Poll for completed and received packets
1440 *
1441 * @v netdev Network device
1442 */
1443 static void intelxl_poll ( struct net_device *netdev ) {
1444 struct intelxl_nic *intelxl = netdev->priv;
1445
1446 /* Acknowledge interrupts, if applicable */
1447 if ( netdev_irq_enabled ( netdev ) ) {
1448 writel ( ( INTELXL_PFINT_DYN_CTL0_CLEARPBA |
1449 INTELXL_PFINT_DYN_CTL0_INTENA_MASK ),
1450 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1451 }
1452
1453 /* Poll for completed packets */
1454 intelxl_poll_tx ( netdev );
1455
1456 /* Poll for received packets */
1457 intelxl_poll_rx ( netdev );
1458
1459 /* Poll for admin events */
1460 intelxl_poll_admin ( netdev );
1461
1462 /* Refill RX ring */
1463 intelxl_refill_rx ( intelxl );
1464 }
1465
1466 /**
1467 * Enable or disable interrupts
1468 *
1469 * @v netdev Network device
1470 * @v enable Interrupts should be enabled
1471 */
1472 static void intelxl_irq ( struct net_device *netdev, int enable ) {
1473 struct intelxl_nic *intelxl = netdev->priv;
1474
1475 if ( enable ) {
1476 writel ( INTELXL_PFINT_DYN_CTL0_INTENA,
1477 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1478 } else {
1479 writel ( 0, intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1480 }
1481 }
1482
1483 /** Network device operations */
1484 static struct net_device_operations intelxl_operations = {
1485 .open = intelxl_open,
1486 .close = intelxl_close,
1487 .transmit = intelxl_transmit,
1488 .poll = intelxl_poll,
1489 .irq = intelxl_irq,
1490 };
1491
1492 /******************************************************************************
1493 *
1494 * PCI interface
1495 *
1496 ******************************************************************************
1497 */
1498
1499 /**
1500 * Probe PCI device
1501 *
1502 * @v pci PCI device
1503 * @ret rc Return status code
1504 */
1505 static int intelxl_probe ( struct pci_device *pci ) {
1506 struct net_device *netdev;
1507 struct intelxl_nic *intelxl;
1508 uint32_t pfgen_portnum;
1509 uint32_t pflan_qalloc;
1510 int rc;
1511
1512 /* Allocate and initialise net device */
1513 netdev = alloc_etherdev ( sizeof ( *intelxl ) );
1514 if ( ! netdev ) {
1515 rc = -ENOMEM;
1516 goto err_alloc;
1517 }
1518 netdev_init ( netdev, &intelxl_operations );
1519 intelxl = netdev->priv;
1520 pci_set_drvdata ( pci, netdev );
1521 netdev->dev = &pci->dev;
1522 memset ( intelxl, 0, sizeof ( *intelxl ) );
1523 intelxl->pf = PCI_FUNC ( pci->busdevfn );
1524 intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD,
1525 &intelxl_admin_offsets );
1526 intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT,
1527 &intelxl_admin_offsets );
1528 intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
1529 intelxl_context_tx );
1530 intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
1531 intelxl_context_rx );
1532
1533 /* Fix up PCI device */
1534 adjust_pci_device ( pci );
1535
1536 /* Map registers */
1537 intelxl->regs = ioremap ( pci->membase, INTELXL_BAR_SIZE );
1538 if ( ! intelxl->regs ) {
1539 rc = -ENODEV;
1540 goto err_ioremap;
1541 }
1542
1543 /* Reset the NIC */
1544 if ( ( rc = intelxl_reset ( intelxl ) ) != 0 )
1545 goto err_reset;
1546
1547 /* Get port number and base queue number */
1548 pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM );
1549 intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum );
1550 pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC );
1551 intelxl->base = INTELXL_PFLAN_QALLOC_FIRSTQ ( pflan_qalloc );
1552 DBGC ( intelxl, "INTELXL %p PF %d using port %d queues [%#04x-%#04x]\n",
1553 intelxl, intelxl->pf, intelxl->port, intelxl->base,
1554 INTELXL_PFLAN_QALLOC_LASTQ ( pflan_qalloc ) );
1555
1556 /* Fetch MAC address and maximum frame size */
1557 if ( ( rc = intelxl_fetch_mac ( intelxl, netdev ) ) != 0 )
1558 goto err_fetch_mac;
1559
1560 /* Open admin queues */
1561 if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 )
1562 goto err_open_admin;
1563
1564 /* Get switch configuration */
1565 if ( ( rc = intelxl_admin_switch ( intelxl ) ) != 0 )
1566 goto err_admin_switch;
1567
1568 /* Get VSI configuration */
1569 if ( ( rc = intelxl_admin_vsi ( intelxl ) ) != 0 )
1570 goto err_admin_vsi;
1571
1572 /* Configure switch for promiscuous mode */
1573 if ( ( rc = intelxl_admin_promisc ( intelxl ) ) != 0 )
1574 goto err_admin_promisc;
1575
1576 /* Configure queue register addresses */
1577 intelxl->tx.reg = INTELXL_QTX ( intelxl->queue );
1578 intelxl->rx.reg = INTELXL_QRX ( intelxl->queue );
1579
1580 /* Configure interrupt causes */
1581 writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE |
1582 INTELXL_QINT_TQCTL_CAUSE_ENA ),
1583 intelxl->regs + INTELXL_QINT_TQCTL ( intelxl->queue ) );
1584 writel ( ( INTELXL_QINT_RQCTL_NEXTQ_INDX ( intelxl->queue ) |
1585 INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX |
1586 INTELXL_QINT_RQCTL_CAUSE_ENA ),
1587 intelxl->regs + INTELXL_QINT_RQCTL ( intelxl->queue ) );
1588 writel ( ( INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( intelxl->queue ) |
1589 INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX ),
1590 intelxl->regs + INTELXL_PFINT_LNKLST0 );
1591 writel ( INTELXL_PFINT_ICR0_ENA_ADMINQ,
1592 intelxl->regs + INTELXL_PFINT_ICR0_ENA );
1593
1594 /* Register network device */
1595 if ( ( rc = register_netdev ( netdev ) ) != 0 )
1596 goto err_register_netdev;
1597
1598 /* Set initial link state */
1599 intelxl_admin_link ( netdev );
1600
1601 return 0;
1602
1603 unregister_netdev ( netdev );
1604 err_register_netdev:
1605 err_admin_promisc:
1606 err_admin_vsi:
1607 err_admin_switch:
1608 intelxl_close_admin ( intelxl );
1609 err_open_admin:
1610 err_fetch_mac:
1611 intelxl_reset ( intelxl );
1612 err_reset:
1613 iounmap ( intelxl->regs );
1614 err_ioremap:
1615 netdev_nullify ( netdev );
1616 netdev_put ( netdev );
1617 err_alloc:
1618 return rc;
1619 }
1620
1621 /**
1622 * Remove PCI device
1623 *
1624 * @v pci PCI device
1625 */
1626 static void intelxl_remove ( struct pci_device *pci ) {
1627 struct net_device *netdev = pci_get_drvdata ( pci );
1628 struct intelxl_nic *intelxl = netdev->priv;
1629
1630 /* Unregister network device */
1631 unregister_netdev ( netdev );
1632
1633 /* Close admin queues */
1634 intelxl_close_admin ( intelxl );
1635
1636 /* Reset the NIC */
1637 intelxl_reset ( intelxl );
1638
1639 /* Free network device */
1640 iounmap ( intelxl->regs );
1641 netdev_nullify ( netdev );
1642 netdev_put ( netdev );
1643 }
1644
1645 /** PCI device IDs */
1646 static struct pci_device_id intelxl_nics[] = {
1647 PCI_ROM ( 0x8086, 0x1572, "x710-sfp", "X710 10GbE SFP+", 0 ),
1648 PCI_ROM ( 0x8086, 0x1574, "xl710-qemu", "Virtual XL710", 0 ),
1649 PCI_ROM ( 0x8086, 0x1580, "xl710-kx-b", "XL710 40GbE backplane", 0 ),
1650 PCI_ROM ( 0x8086, 0x1581, "xl710-kx-c", "XL710 10GbE backplane", 0 ),
1651 PCI_ROM ( 0x8086, 0x1583, "xl710-qda2", "XL710 40GbE QSFP+", 0 ),
1652 PCI_ROM ( 0x8086, 0x1584, "xl710-qda1", "XL710 40GbE QSFP+", 0 ),
1653 PCI_ROM ( 0x8086, 0x1585, "x710-qsfp", "X710 10GbE QSFP+", 0 ),
1654 PCI_ROM ( 0x8086, 0x1586, "x710-10gt", "X710 10GBASE-T", 0 ),
1655 PCI_ROM ( 0x8086, 0x1587, "x710-kr2", "XL710 20GbE backplane", 0 ),
1656 PCI_ROM ( 0x8086, 0x1588, "x710-kr2-a", "XL710 20GbE backplane", 0 ),
1657 PCI_ROM ( 0x8086, 0x1589, "x710-10gt4", "X710 10GBASE-T4", 0 ),
1658 PCI_ROM ( 0x8086, 0x158a, "xxv710", "XXV710 25GbE backplane", 0 ),
1659 PCI_ROM ( 0x8086, 0x158b, "xxv710-sfp28", "XXV710 25GbE SFP28", 0 ),
1660 PCI_ROM ( 0x8086, 0x37ce, "x722-kx", "X722 10GbE backplane", 0 ),
1661 PCI_ROM ( 0x8086, 0x37cf, "x722-qsfp", "X722 10GbE QSFP+", 0 ),
1662 PCI_ROM ( 0x8086, 0x37d0, "x722-sfp", "X722 10GbE SFP+", 0 ),
1663 PCI_ROM ( 0x8086, 0x37d1, "x722-1gt", "X722 1GBASE-T", 0 ),
1664 PCI_ROM ( 0x8086, 0x37d2, "x722-10gt", "X722 10GBASE-T", 0 ),
1665 PCI_ROM ( 0x8086, 0x37d3, "x722-sfp-i", "X722 10GbE SFP+", 0 ),
1666 };
1667
1668 /** PCI driver */
1669 struct pci_driver intelxl_driver __pci_driver = {
1670 .ids = intelxl_nics,
1671 .id_count = ( sizeof ( intelxl_nics ) / sizeof ( intelxl_nics[0] ) ),
1672 .probe = intelxl_probe,
1673 .remove = intelxl_remove,
1674 };