[intelxl] Use one admin queue buffer per admin queue descriptor
[ipxe.git] / src / drivers / net / intelxl.c
1 /*
2 * Copyright (C) 2018 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 #include <stdint.h>
27 #include <string.h>
28 #include <stdio.h>
29 #include <unistd.h>
30 #include <errno.h>
31 #include <byteswap.h>
32 #include <ipxe/netdevice.h>
33 #include <ipxe/ethernet.h>
34 #include <ipxe/if_ether.h>
35 #include <ipxe/vlan.h>
36 #include <ipxe/iobuf.h>
37 #include <ipxe/malloc.h>
38 #include <ipxe/pci.h>
39 #include <ipxe/version.h>
40 #include "intelxl.h"
41
42 /** @file
43 *
44 * Intel 40 Gigabit Ethernet network card driver
45 *
46 */
47
48 /******************************************************************************
49 *
50 * Device reset
51 *
52 ******************************************************************************
53 */
54
55 /**
56 * Reset hardware
57 *
58 * @v intelxl Intel device
59 * @ret rc Return status code
60 */
61 static int intelxl_reset ( struct intelxl_nic *intelxl ) {
62 uint32_t pfgen_ctrl;
63
64 /* Perform a global software reset */
65 pfgen_ctrl = readl ( intelxl->regs + INTELXL_PFGEN_CTRL );
66 writel ( ( pfgen_ctrl | INTELXL_PFGEN_CTRL_PFSWR ),
67 intelxl->regs + INTELXL_PFGEN_CTRL );
68 mdelay ( INTELXL_RESET_DELAY_MS );
69
70 return 0;
71 }
72
73 /******************************************************************************
74 *
75 * MAC address
76 *
77 ******************************************************************************
78 */
79
80 /**
81 * Fetch initial MAC address and maximum frame size
82 *
83 * @v intelxl Intel device
84 * @v netdev Network device
85 * @ret rc Return status code
86 */
87 static int intelxl_fetch_mac ( struct intelxl_nic *intelxl,
88 struct net_device *netdev ) {
89 union intelxl_receive_address mac;
90 uint32_t prtgl_sal;
91 uint32_t prtgl_sah;
92 size_t mfs;
93
94 /* Read NVM-loaded address */
95 prtgl_sal = readl ( intelxl->regs + INTELXL_PRTGL_SAL );
96 prtgl_sah = readl ( intelxl->regs + INTELXL_PRTGL_SAH );
97 mac.reg.low = cpu_to_le32 ( prtgl_sal );
98 mac.reg.high = cpu_to_le32 ( prtgl_sah );
99
100 /* Check that address is valid */
101 if ( ! is_valid_ether_addr ( mac.raw ) ) {
102 DBGC ( intelxl, "INTELXL %p has invalid MAC address (%s)\n",
103 intelxl, eth_ntoa ( mac.raw ) );
104 return -ENOENT;
105 }
106
107 /* Copy MAC address */
108 DBGC ( intelxl, "INTELXL %p has autoloaded MAC address %s\n",
109 intelxl, eth_ntoa ( mac.raw ) );
110 memcpy ( netdev->hw_addr, mac.raw, ETH_ALEN );
111
112 /* Get maximum frame size */
113 mfs = INTELXL_PRTGL_SAH_MFS_GET ( prtgl_sah );
114 netdev->max_pkt_len = ( mfs - 4 /* CRC */ );
115
116 return 0;
117 }
118
119 /******************************************************************************
120 *
121 * Admin queue
122 *
123 ******************************************************************************
124 */
125
126 /** Admin queue register offsets */
127 static const struct intelxl_admin_offsets intelxl_admin_offsets = {
128 .bal = INTELXL_ADMIN_BAL,
129 .bah = INTELXL_ADMIN_BAH,
130 .len = INTELXL_ADMIN_LEN,
131 .head = INTELXL_ADMIN_HEAD,
132 .tail = INTELXL_ADMIN_TAIL,
133 };
134
135 /**
136 * Create admin queue
137 *
138 * @v intelxl Intel device
139 * @v admin Admin queue
140 * @ret rc Return status code
141 */
142 static int intelxl_create_admin ( struct intelxl_nic *intelxl,
143 struct intelxl_admin *admin ) {
144 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
145 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
146 const struct intelxl_admin_offsets *regs = admin->regs;
147 void *admin_regs = ( intelxl->regs + admin->base );
148 physaddr_t address;
149
150 /* Allocate admin queue */
151 admin->buf = malloc_dma ( ( buf_len + len ), INTELXL_ALIGN );
152 if ( ! admin->buf )
153 return -ENOMEM;
154 admin->desc = ( ( ( void * ) admin->buf ) + buf_len );
155
156 /* Initialise admin queue */
157 memset ( admin->desc, 0, len );
158
159 /* Reset head and tail registers */
160 writel ( 0, admin_regs + regs->head );
161 writel ( 0, admin_regs + regs->tail );
162
163 /* Reset queue index */
164 admin->index = 0;
165
166 /* Program queue address */
167 address = virt_to_bus ( admin->desc );
168 writel ( ( address & 0xffffffffUL ), admin_regs + regs->bal );
169 if ( sizeof ( physaddr_t ) > sizeof ( uint32_t ) ) {
170 writel ( ( ( ( uint64_t ) address ) >> 32 ),
171 admin_regs + regs->bah );
172 } else {
173 writel ( 0, admin_regs + regs->bah );
174 }
175
176 /* Program queue length and enable queue */
177 writel ( ( INTELXL_ADMIN_LEN_LEN ( INTELXL_ADMIN_NUM_DESC ) |
178 INTELXL_ADMIN_LEN_ENABLE ),
179 admin_regs + regs->len );
180
181 DBGC ( intelxl, "INTELXL %p A%cQ is at [%08llx,%08llx) buf "
182 "[%08llx,%08llx)\n", intelxl,
183 ( ( admin == &intelxl->command ) ? 'T' : 'R' ),
184 ( ( unsigned long long ) address ),
185 ( ( unsigned long long ) address + len ),
186 ( ( unsigned long long ) virt_to_bus ( admin->buf ) ),
187 ( ( unsigned long long ) ( virt_to_bus ( admin->buf ) +
188 buf_len ) ) );
189 return 0;
190 }
191
192 /**
193 * Destroy admin queue
194 *
195 * @v intelxl Intel device
196 * @v admin Admin queue
197 */
198 static void intelxl_destroy_admin ( struct intelxl_nic *intelxl,
199 struct intelxl_admin *admin ) {
200 size_t buf_len = ( sizeof ( admin->buf[0] ) * INTELXL_ADMIN_NUM_DESC );
201 size_t len = ( sizeof ( admin->desc[0] ) * INTELXL_ADMIN_NUM_DESC );
202 const struct intelxl_admin_offsets *regs = admin->regs;
203 void *admin_regs = ( intelxl->regs + admin->base );
204
205 /* Disable queue */
206 writel ( 0, admin_regs + regs->len );
207
208 /* Free queue */
209 free_dma ( admin->buf, ( buf_len + len ) );
210 }
211
212 /**
213 * Get next admin command queue descriptor
214 *
215 * @v intelxl Intel device
216 * @ret cmd Command descriptor
217 */
218 static struct intelxl_admin_descriptor *
219 intelxl_admin_command_descriptor ( struct intelxl_nic *intelxl ) {
220 struct intelxl_admin *admin = &intelxl->command;
221 struct intelxl_admin_descriptor *cmd;
222
223 /* Get and initialise next descriptor */
224 cmd = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
225 memset ( cmd, 0, sizeof ( *cmd ) );
226 return cmd;
227 }
228
229 /**
230 * Get next admin command queue data buffer
231 *
232 * @v intelxl Intel device
233 * @ret buf Data buffer
234 */
235 static union intelxl_admin_buffer *
236 intelxl_admin_command_buffer ( struct intelxl_nic *intelxl ) {
237 struct intelxl_admin *admin = &intelxl->command;
238 union intelxl_admin_buffer *buf;
239
240 /* Get next data buffer */
241 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
242 memset ( buf, 0, sizeof ( *buf ) );
243 return buf;
244 }
245
246 /**
247 * Initialise admin event queue descriptor
248 *
249 * @v intelxl Intel device
250 * @v index Event queue index
251 */
252 static void intelxl_admin_event_init ( struct intelxl_nic *intelxl,
253 unsigned int index ) {
254 struct intelxl_admin *admin = &intelxl->event;
255 struct intelxl_admin_descriptor *evt;
256 union intelxl_admin_buffer *buf;
257 uint64_t address;
258
259 /* Initialise descriptor */
260 evt = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
261 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
262 address = virt_to_bus ( buf );
263 evt->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
264 evt->len = cpu_to_le16 ( sizeof ( *buf ) );
265 evt->params.buffer.high = cpu_to_le32 ( address >> 32 );
266 evt->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
267 }
268
269 /**
270 * Issue admin queue command
271 *
272 * @v intelxl Intel device
273 * @ret rc Return status code
274 */
275 static int intelxl_admin_command ( struct intelxl_nic *intelxl ) {
276 struct intelxl_admin *admin = &intelxl->command;
277 const struct intelxl_admin_offsets *regs = admin->regs;
278 void *admin_regs = ( intelxl->regs + admin->base );
279 struct intelxl_admin_descriptor *cmd;
280 union intelxl_admin_buffer *buf;
281 uint64_t address;
282 uint32_t cookie;
283 unsigned int index;
284 unsigned int tail;
285 unsigned int i;
286 int rc;
287
288 /* Get next queue entry */
289 index = admin->index++;
290 tail = ( admin->index % INTELXL_ADMIN_NUM_DESC );
291 cmd = &admin->desc[ index % INTELXL_ADMIN_NUM_DESC ];
292 buf = &admin->buf[ index % INTELXL_ADMIN_NUM_DESC ];
293 DBGC2 ( intelxl, "INTELXL %p admin command %#x opcode %#04x:\n",
294 intelxl, index, le16_to_cpu ( cmd->opcode ) );
295
296 /* Sanity checks */
297 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_DD ) ) );
298 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_CMP ) ) );
299 assert ( ! ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_ERR ) ) );
300 assert ( cmd->ret == 0 );
301
302 /* Populate data buffer address if applicable */
303 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
304 address = virt_to_bus ( buf );
305 cmd->params.buffer.high = cpu_to_le32 ( address >> 32 );
306 cmd->params.buffer.low = cpu_to_le32 ( address & 0xffffffffUL );
307 }
308
309 /* Populate cookie */
310 cmd->cookie = cpu_to_le32 ( index );
311
312 /* Record cookie */
313 cookie = cmd->cookie;
314
315 /* Post command descriptor */
316 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
317 if ( cmd->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
318 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
319 le16_to_cpu ( cmd->len ) );
320 }
321 wmb();
322 writel ( tail, admin_regs + regs->tail );
323
324 /* Wait for completion */
325 for ( i = 0 ; i < INTELXL_ADMIN_MAX_WAIT_MS ; i++ ) {
326
327 /* If response is not complete, delay 1ms and retry */
328 if ( ! ( cmd->flags & INTELXL_ADMIN_FL_DD ) ) {
329 mdelay ( 1 );
330 continue;
331 }
332 DBGC2 ( intelxl, "INTELXL %p admin command %#x response:\n",
333 intelxl, index );
334 DBGC2_HDA ( intelxl, virt_to_phys ( cmd ), cmd,
335 sizeof ( *cmd ) );
336
337 /* Check for cookie mismatch */
338 if ( cmd->cookie != cookie ) {
339 DBGC ( intelxl, "INTELXL %p admin command %#x bad "
340 "cookie %#x\n", intelxl, index,
341 le32_to_cpu ( cmd->cookie ) );
342 rc = -EPROTO;
343 goto err;
344 }
345
346 /* Check for errors */
347 if ( cmd->ret != 0 ) {
348 DBGC ( intelxl, "INTELXL %p admin command %#x error "
349 "%d\n", intelxl, index,
350 le16_to_cpu ( cmd->ret ) );
351 rc = -EIO;
352 goto err;
353 }
354
355 /* Success */
356 return 0;
357 }
358
359 rc = -ETIMEDOUT;
360 DBGC ( intelxl, "INTELXL %p timed out waiting for admin command %#x:\n",
361 intelxl, index );
362 err:
363 DBGC_HDA ( intelxl, virt_to_phys ( cmd ), cmd, sizeof ( *cmd ) );
364 return rc;
365 }
366
367 /**
368 * Get firmware version
369 *
370 * @v intelxl Intel device
371 * @ret rc Return status code
372 */
373 static int intelxl_admin_version ( struct intelxl_nic *intelxl ) {
374 struct intelxl_admin_descriptor *cmd;
375 struct intelxl_admin_version_params *version;
376 unsigned int api;
377 int rc;
378
379 /* Populate descriptor */
380 cmd = intelxl_admin_command_descriptor ( intelxl );
381 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VERSION );
382 version = &cmd->params.version;
383
384 /* Issue command */
385 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
386 return rc;
387 api = le16_to_cpu ( version->api.major );
388 DBGC ( intelxl, "INTELXL %p firmware v%d.%d API v%d.%d\n",
389 intelxl, le16_to_cpu ( version->firmware.major ),
390 le16_to_cpu ( version->firmware.minor ),
391 api, le16_to_cpu ( version->api.minor ) );
392
393 /* Check for API compatibility */
394 if ( api > INTELXL_ADMIN_API_MAJOR ) {
395 DBGC ( intelxl, "INTELXL %p unsupported API v%d\n",
396 intelxl, api );
397 return -ENOTSUP;
398 }
399
400 return 0;
401 }
402
403 /**
404 * Report driver version
405 *
406 * @v intelxl Intel device
407 * @ret rc Return status code
408 */
409 static int intelxl_admin_driver ( struct intelxl_nic *intelxl ) {
410 struct intelxl_admin_descriptor *cmd;
411 struct intelxl_admin_driver_params *driver;
412 union intelxl_admin_buffer *buf;
413 int rc;
414
415 /* Populate descriptor */
416 cmd = intelxl_admin_command_descriptor ( intelxl );
417 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_DRIVER );
418 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_RD | INTELXL_ADMIN_FL_BUF );
419 cmd->len = cpu_to_le16 ( sizeof ( buf->driver ) );
420 driver = &cmd->params.driver;
421 driver->major = product_major_version;
422 driver->minor = product_minor_version;
423 buf = intelxl_admin_command_buffer ( intelxl );
424 snprintf ( buf->driver.name, sizeof ( buf->driver.name ), "%s",
425 ( product_name[0] ? product_name : product_short_name ) );
426
427 /* Issue command */
428 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
429 return rc;
430
431 return 0;
432 }
433
434 /**
435 * Shutdown admin queues
436 *
437 * @v intelxl Intel device
438 * @ret rc Return status code
439 */
440 static int intelxl_admin_shutdown ( struct intelxl_nic *intelxl ) {
441 struct intelxl_admin_descriptor *cmd;
442 struct intelxl_admin_shutdown_params *shutdown;
443 int rc;
444
445 /* Populate descriptor */
446 cmd = intelxl_admin_command_descriptor ( intelxl );
447 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SHUTDOWN );
448 shutdown = &cmd->params.shutdown;
449 shutdown->unloading = INTELXL_ADMIN_SHUTDOWN_UNLOADING;
450
451 /* Issue command */
452 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
453 return rc;
454
455 return 0;
456 }
457
458 /**
459 * Get switch configuration
460 *
461 * @v intelxl Intel device
462 * @ret rc Return status code
463 */
464 static int intelxl_admin_switch ( struct intelxl_nic *intelxl ) {
465 struct intelxl_admin_descriptor *cmd;
466 struct intelxl_admin_switch_params *sw;
467 union intelxl_admin_buffer *buf;
468 int rc;
469
470 /* Populate descriptor */
471 cmd = intelxl_admin_command_descriptor ( intelxl );
472 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_SWITCH );
473 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
474 cmd->len = cpu_to_le16 ( sizeof ( buf->sw ) );
475 sw = &cmd->params.sw;
476 buf = intelxl_admin_command_buffer ( intelxl );
477
478 /* Get each configuration in turn */
479 do {
480 /* Issue command */
481 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
482 return rc;
483
484 /* Dump raw configuration */
485 DBGC2 ( intelxl, "INTELXL %p SEID %#04x:\n",
486 intelxl, le16_to_cpu ( buf->sw.cfg.seid ) );
487 DBGC2_HDA ( intelxl, 0, &buf->sw.cfg, sizeof ( buf->sw.cfg ) );
488
489 /* Parse response */
490 if ( buf->sw.cfg.type == INTELXL_ADMIN_SWITCH_TYPE_VSI ) {
491 intelxl->vsi = le16_to_cpu ( buf->sw.cfg.seid );
492 DBGC ( intelxl, "INTELXL %p VSI %#04x uplink %#04x "
493 "downlink %#04x conn %#02x\n", intelxl,
494 intelxl->vsi, le16_to_cpu ( buf->sw.cfg.uplink ),
495 le16_to_cpu ( buf->sw.cfg.downlink ),
496 buf->sw.cfg.connection );
497 }
498
499 } while ( sw->next );
500
501 /* Check that we found a VSI */
502 if ( ! intelxl->vsi ) {
503 DBGC ( intelxl, "INTELXL %p has no VSI\n", intelxl );
504 return -ENOENT;
505 }
506
507 return 0;
508 }
509
510 /**
511 * Get VSI parameters
512 *
513 * @v intelxl Intel device
514 * @ret rc Return status code
515 */
516 static int intelxl_admin_vsi ( struct intelxl_nic *intelxl ) {
517 struct intelxl_admin_descriptor *cmd;
518 struct intelxl_admin_vsi_params *vsi;
519 union intelxl_admin_buffer *buf;
520 int rc;
521
522 /* Populate descriptor */
523 cmd = intelxl_admin_command_descriptor ( intelxl );
524 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_VSI );
525 cmd->flags = cpu_to_le16 ( INTELXL_ADMIN_FL_BUF );
526 cmd->len = cpu_to_le16 ( sizeof ( buf->vsi ) );
527 vsi = &cmd->params.vsi;
528 vsi->vsi = cpu_to_le16 ( intelxl->vsi );
529 buf = intelxl_admin_command_buffer ( intelxl );
530
531 /* Issue command */
532 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
533 return rc;
534
535 /* Parse response */
536 intelxl->queue = le16_to_cpu ( buf->vsi.queue[0] );
537 intelxl->qset = le16_to_cpu ( buf->vsi.qset[0] );
538 DBGC ( intelxl, "INTELXL %p VSI %#04x queue %#04x qset %#04x\n",
539 intelxl, intelxl->vsi, intelxl->queue, intelxl->qset );
540
541 return 0;
542 }
543
544 /**
545 * Set VSI promiscuous modes
546 *
547 * @v intelxl Intel device
548 * @ret rc Return status code
549 */
550 static int intelxl_admin_promisc ( struct intelxl_nic *intelxl ) {
551 struct intelxl_admin_descriptor *cmd;
552 struct intelxl_admin_promisc_params *promisc;
553 uint16_t flags;
554 int rc;
555
556 /* Populate descriptor */
557 cmd = intelxl_admin_command_descriptor ( intelxl );
558 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_PROMISC );
559 flags = ( INTELXL_ADMIN_PROMISC_FL_UNICAST |
560 INTELXL_ADMIN_PROMISC_FL_MULTICAST |
561 INTELXL_ADMIN_PROMISC_FL_BROADCAST |
562 INTELXL_ADMIN_PROMISC_FL_VLAN );
563 promisc = &cmd->params.promisc;
564 promisc->flags = cpu_to_le16 ( flags );
565 promisc->valid = cpu_to_le16 ( flags );
566 promisc->vsi = cpu_to_le16 ( intelxl->vsi );
567
568 /* Issue command */
569 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
570 return rc;
571
572 return 0;
573 }
574
575 /**
576 * Restart autonegotiation
577 *
578 * @v intelxl Intel device
579 * @ret rc Return status code
580 */
581 static int intelxl_admin_autoneg ( struct intelxl_nic *intelxl ) {
582 struct intelxl_admin_descriptor *cmd;
583 struct intelxl_admin_autoneg_params *autoneg;
584 int rc;
585
586 /* Populate descriptor */
587 cmd = intelxl_admin_command_descriptor ( intelxl );
588 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_AUTONEG );
589 autoneg = &cmd->params.autoneg;
590 autoneg->flags = ( INTELXL_ADMIN_AUTONEG_FL_RESTART |
591 INTELXL_ADMIN_AUTONEG_FL_ENABLE );
592
593 /* Issue command */
594 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
595 return rc;
596
597 return 0;
598 }
599
600 /**
601 * Get link status
602 *
603 * @v netdev Network device
604 * @ret rc Return status code
605 */
606 static int intelxl_admin_link ( struct net_device *netdev ) {
607 struct intelxl_nic *intelxl = netdev->priv;
608 struct intelxl_admin_descriptor *cmd;
609 struct intelxl_admin_link_params *link;
610 int rc;
611
612 /* Populate descriptor */
613 cmd = intelxl_admin_command_descriptor ( intelxl );
614 cmd->opcode = cpu_to_le16 ( INTELXL_ADMIN_LINK );
615 link = &cmd->params.link;
616 link->notify = INTELXL_ADMIN_LINK_NOTIFY;
617
618 /* Issue command */
619 if ( ( rc = intelxl_admin_command ( intelxl ) ) != 0 )
620 return rc;
621 DBGC ( intelxl, "INTELXL %p PHY %#02x speed %#02x status %#02x\n",
622 intelxl, link->phy, link->speed, link->status );
623
624 /* Update network device */
625 if ( link->status & INTELXL_ADMIN_LINK_UP ) {
626 netdev_link_up ( netdev );
627 } else {
628 netdev_link_down ( netdev );
629 }
630
631 return 0;
632 }
633
634 /**
635 * Refill admin event queue
636 *
637 * @v intelxl Intel device
638 */
639 static void intelxl_refill_admin ( struct intelxl_nic *intelxl ) {
640 struct intelxl_admin *admin = &intelxl->event;
641 const struct intelxl_admin_offsets *regs = admin->regs;
642 void *admin_regs = ( intelxl->regs + admin->base );
643 unsigned int tail;
644
645 /* Update tail pointer */
646 tail = ( ( admin->index + INTELXL_ADMIN_NUM_DESC - 1 ) %
647 INTELXL_ADMIN_NUM_DESC );
648 wmb();
649 writel ( tail, admin_regs + regs->tail );
650 }
651
652 /**
653 * Poll admin event queue
654 *
655 * @v netdev Network device
656 */
657 static void intelxl_poll_admin ( struct net_device *netdev ) {
658 struct intelxl_nic *intelxl = netdev->priv;
659 struct intelxl_admin *admin = &intelxl->event;
660 struct intelxl_admin_descriptor *evt;
661 union intelxl_admin_buffer *buf;
662
663 /* Check for events */
664 while ( 1 ) {
665
666 /* Get next event descriptor and data buffer */
667 evt = &admin->desc[ admin->index % INTELXL_ADMIN_NUM_DESC ];
668 buf = &admin->buf[ admin->index % INTELXL_ADMIN_NUM_DESC ];
669
670 /* Stop if descriptor is not yet completed */
671 if ( ! ( evt->flags & INTELXL_ADMIN_FL_DD ) )
672 return;
673 DBGC2 ( intelxl, "INTELXL %p admin event %#x:\n",
674 intelxl, admin->index );
675 DBGC2_HDA ( intelxl, virt_to_phys ( evt ), evt,
676 sizeof ( *evt ) );
677 if ( evt->flags & cpu_to_le16 ( INTELXL_ADMIN_FL_BUF ) ) {
678 DBGC2_HDA ( intelxl, virt_to_phys ( buf ), buf,
679 le16_to_cpu ( evt->len ) );
680 }
681
682 /* Handle event */
683 switch ( evt->opcode ) {
684 case cpu_to_le16 ( INTELXL_ADMIN_LINK ):
685 intelxl_admin_link ( netdev );
686 break;
687 default:
688 DBGC ( intelxl, "INTELXL %p admin event %#x "
689 "unrecognised opcode %#04x\n", intelxl,
690 admin->index, le16_to_cpu ( evt->opcode ) );
691 break;
692 }
693
694 /* Reset descriptor and refill queue */
695 intelxl_admin_event_init ( intelxl, admin->index );
696 admin->index++;
697 intelxl_refill_admin ( intelxl );
698 }
699 }
700
701 /**
702 * Open admin queues
703 *
704 * @v intelxl Intel device
705 * @ret rc Return status code
706 */
707 static int intelxl_open_admin ( struct intelxl_nic *intelxl ) {
708 unsigned int i;
709 int rc;
710
711 /* Create admin event queue */
712 if ( ( rc = intelxl_create_admin ( intelxl, &intelxl->event ) ) != 0 )
713 goto err_create_event;
714
715 /* Create admin command queue */
716 if ( ( rc = intelxl_create_admin ( intelxl, &intelxl->command ) ) != 0 )
717 goto err_create_command;
718
719 /* Initialise all admin event queue descriptors */
720 for ( i = 0 ; i < INTELXL_ADMIN_NUM_DESC ; i++ )
721 intelxl_admin_event_init ( intelxl, i );
722
723 /* Post all descriptors to event queue */
724 intelxl_refill_admin ( intelxl );
725
726 /* Get firmware version */
727 if ( ( rc = intelxl_admin_version ( intelxl ) ) != 0 )
728 goto err_version;
729
730 /* Report driver version */
731 if ( ( rc = intelxl_admin_driver ( intelxl ) ) != 0 )
732 goto err_driver;
733
734 return 0;
735
736 err_driver:
737 err_version:
738 intelxl_destroy_admin ( intelxl, &intelxl->command );
739 err_create_command:
740 intelxl_destroy_admin ( intelxl, &intelxl->event );
741 err_create_event:
742 return rc;
743 }
744
745 /**
746 * Close admin queues
747 *
748 * @v intelxl Intel device
749 */
750 static void intelxl_close_admin ( struct intelxl_nic *intelxl ) {
751
752 /* Shut down admin queues */
753 intelxl_admin_shutdown ( intelxl );
754
755 /* Destroy admin command queue */
756 intelxl_destroy_admin ( intelxl, &intelxl->command );
757
758 /* Destroy admin event queue */
759 intelxl_destroy_admin ( intelxl, &intelxl->event );
760 }
761
762 /******************************************************************************
763 *
764 * Descriptor rings
765 *
766 ******************************************************************************
767 */
768
769 /**
770 * Dump queue context (for debugging)
771 *
772 * @v intelxl Intel device
773 * @v op Context operation
774 * @v len Size of context
775 */
776 static __attribute__ (( unused )) void
777 intelxl_context_dump ( struct intelxl_nic *intelxl, uint32_t op, size_t len ) {
778 struct intelxl_context_line line;
779 uint32_t pfcm_lanctxctl;
780 uint32_t pfcm_lanctxstat;
781 unsigned int queue;
782 unsigned int index;
783 unsigned int i;
784
785 /* Do nothing unless debug output is enabled */
786 if ( ! DBG_EXTRA )
787 return;
788
789 /* Dump context */
790 DBGC2 ( intelxl, "INTELXL %p context %#08x:\n", intelxl, op );
791 for ( index = 0 ; ( sizeof ( line ) * index ) < len ; index++ ) {
792
793 /* Start context operation */
794 queue = ( intelxl->base + intelxl->queue );
795 pfcm_lanctxctl =
796 ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
797 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
798 INTELXL_PFCM_LANCTXCTL_OP_CODE_READ | op );
799 writel ( pfcm_lanctxctl,
800 intelxl->regs + INTELXL_PFCM_LANCTXCTL );
801
802 /* Wait for operation to complete */
803 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
804
805 /* Check if operation is complete */
806 pfcm_lanctxstat = readl ( intelxl->regs +
807 INTELXL_PFCM_LANCTXSTAT );
808 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
809 break;
810
811 /* Delay */
812 mdelay ( 1 );
813 }
814
815 /* Read context data */
816 for ( i = 0 ; i < ( sizeof ( line ) /
817 sizeof ( line.raw[0] ) ) ; i++ ) {
818 line.raw[i] = readl ( intelxl->regs +
819 INTELXL_PFCM_LANCTXDATA ( i ) );
820 }
821 DBGC2_HDA ( intelxl, ( sizeof ( line ) * index ),
822 &line, sizeof ( line ) );
823 }
824 }
825
826 /**
827 * Program queue context line
828 *
829 * @v intelxl Intel device
830 * @v line Queue context line
831 * @v index Line number
832 * @v op Context operation
833 * @ret rc Return status code
834 */
835 static int intelxl_context_line ( struct intelxl_nic *intelxl,
836 struct intelxl_context_line *line,
837 unsigned int index, uint32_t op ) {
838 uint32_t pfcm_lanctxctl;
839 uint32_t pfcm_lanctxstat;
840 unsigned int queue;
841 unsigned int i;
842
843 /* Write context data */
844 for ( i = 0; i < ( sizeof ( *line ) / sizeof ( line->raw[0] ) ); i++ ) {
845 writel ( le32_to_cpu ( line->raw[i] ),
846 intelxl->regs + INTELXL_PFCM_LANCTXDATA ( i ) );
847 }
848
849 /* Start context operation */
850 queue = ( intelxl->base + intelxl->queue );
851 pfcm_lanctxctl = ( INTELXL_PFCM_LANCTXCTL_QUEUE_NUM ( queue ) |
852 INTELXL_PFCM_LANCTXCTL_SUB_LINE ( index ) |
853 INTELXL_PFCM_LANCTXCTL_OP_CODE_WRITE | op );
854 writel ( pfcm_lanctxctl, intelxl->regs + INTELXL_PFCM_LANCTXCTL );
855
856 /* Wait for operation to complete */
857 for ( i = 0 ; i < INTELXL_CTX_MAX_WAIT_MS ; i++ ) {
858
859 /* Check if operation is complete */
860 pfcm_lanctxstat = readl ( intelxl->regs +
861 INTELXL_PFCM_LANCTXSTAT );
862 if ( pfcm_lanctxstat & INTELXL_PFCM_LANCTXSTAT_DONE )
863 return 0;
864
865 /* Delay */
866 mdelay ( 1 );
867 }
868
869 DBGC ( intelxl, "INTELXL %p timed out waiting for context: %#08x\n",
870 intelxl, pfcm_lanctxctl );
871 return -ETIMEDOUT;
872 }
873
874 /**
875 * Program queue context
876 *
877 * @v intelxl Intel device
878 * @v line Queue context lines
879 * @v len Size of context
880 * @v op Context operation
881 * @ret rc Return status code
882 */
883 static int intelxl_context ( struct intelxl_nic *intelxl,
884 struct intelxl_context_line *line,
885 size_t len, uint32_t op ) {
886 unsigned int index;
887 int rc;
888
889 DBGC2 ( intelxl, "INTELXL %p context %#08x len %#zx:\n",
890 intelxl, op, len );
891 DBGC2_HDA ( intelxl, 0, line, len );
892
893 /* Program one line at a time */
894 for ( index = 0 ; ( sizeof ( *line ) * index ) < len ; index++ ) {
895 if ( ( rc = intelxl_context_line ( intelxl, line++, index,
896 op ) ) != 0 )
897 return rc;
898 }
899
900 return 0;
901 }
902
903 /**
904 * Program transmit queue context
905 *
906 * @v intelxl Intel device
907 * @v address Descriptor ring base address
908 * @ret rc Return status code
909 */
910 static int intelxl_context_tx ( struct intelxl_nic *intelxl,
911 physaddr_t address ) {
912 union {
913 struct intelxl_context_tx tx;
914 struct intelxl_context_line line;
915 } ctx;
916 int rc;
917
918 /* Initialise context */
919 memset ( &ctx, 0, sizeof ( ctx ) );
920 ctx.tx.flags = cpu_to_le16 ( INTELXL_CTX_TX_FL_NEW );
921 ctx.tx.base = cpu_to_le64 ( INTELXL_CTX_TX_BASE ( address ) );
922 ctx.tx.count =
923 cpu_to_le16 ( INTELXL_CTX_TX_COUNT ( INTELXL_TX_NUM_DESC ) );
924 ctx.tx.qset = INTELXL_CTX_TX_QSET ( intelxl->qset );
925
926 /* Program context */
927 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
928 INTELXL_PFCM_LANCTXCTL_TYPE_TX ) ) != 0 )
929 return rc;
930
931 return 0;
932 }
933
934 /**
935 * Program receive queue context
936 *
937 * @v intelxl Intel device
938 * @v address Descriptor ring base address
939 * @ret rc Return status code
940 */
941 static int intelxl_context_rx ( struct intelxl_nic *intelxl,
942 physaddr_t address ) {
943 union {
944 struct intelxl_context_rx rx;
945 struct intelxl_context_line line;
946 } ctx;
947 uint64_t base_count;
948 int rc;
949
950 /* Initialise context */
951 memset ( &ctx, 0, sizeof ( ctx ) );
952 base_count = INTELXL_CTX_RX_BASE_COUNT ( address, INTELXL_RX_NUM_DESC );
953 ctx.rx.base_count = cpu_to_le64 ( base_count );
954 ctx.rx.len = cpu_to_le16 ( INTELXL_CTX_RX_LEN ( intelxl->mfs ) );
955 ctx.rx.flags = INTELXL_CTX_RX_FL_CRCSTRIP;
956 ctx.rx.mfs = cpu_to_le16 ( INTELXL_CTX_RX_MFS ( intelxl->mfs ) );
957
958 /* Program context */
959 if ( ( rc = intelxl_context ( intelxl, &ctx.line, sizeof ( ctx ),
960 INTELXL_PFCM_LANCTXCTL_TYPE_RX ) ) != 0 )
961 return rc;
962
963 return 0;
964 }
965
966 /**
967 * Enable descriptor ring
968 *
969 * @v intelxl Intel device
970 * @v ring Descriptor ring
971 * @ret rc Return status code
972 */
973 static int intelxl_enable_ring ( struct intelxl_nic *intelxl,
974 struct intelxl_ring *ring ) {
975 void *ring_regs = ( intelxl->regs + ring->reg );
976 uint32_t qxx_ena;
977
978 /* Enable ring */
979 writel ( INTELXL_QXX_ENA_REQ, ( ring_regs + INTELXL_QXX_ENA ) );
980 udelay ( INTELXL_QUEUE_ENABLE_DELAY_US );
981 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
982 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) ) {
983 DBGC ( intelxl, "INTELXL %p ring %06x failed to enable: "
984 "%#08x\n", intelxl, ring->reg, qxx_ena );
985 return -EIO;
986 }
987
988 return 0;
989 }
990
991 /**
992 * Disable descriptor ring
993 *
994 * @v intelxl Intel device
995 * @v ring Descriptor ring
996 * @ret rc Return status code
997 */
998 static int intelxl_disable_ring ( struct intelxl_nic *intelxl,
999 struct intelxl_ring *ring ) {
1000 void *ring_regs = ( intelxl->regs + ring->reg );
1001 uint32_t qxx_ena;
1002 unsigned int i;
1003
1004 /* Disable ring */
1005 writel ( 0, ( ring_regs + INTELXL_QXX_ENA ) );
1006
1007 /* Wait for ring to be disabled */
1008 for ( i = 0 ; i < INTELXL_QUEUE_DISABLE_MAX_WAIT_MS ; i++ ) {
1009
1010 /* Check if ring is disabled */
1011 qxx_ena = readl ( ring_regs + INTELXL_QXX_ENA );
1012 if ( ! ( qxx_ena & INTELXL_QXX_ENA_STAT ) )
1013 return 0;
1014
1015 /* Delay */
1016 mdelay ( 1 );
1017 }
1018
1019 DBGC ( intelxl, "INTELXL %p ring %06x timed out waiting for disable: "
1020 "%#08x\n", intelxl, ring->reg, qxx_ena );
1021 return -ETIMEDOUT;
1022 }
1023
1024 /**
1025 * Create descriptor ring
1026 *
1027 * @v intelxl Intel device
1028 * @v ring Descriptor ring
1029 * @ret rc Return status code
1030 */
1031 static int intelxl_create_ring ( struct intelxl_nic *intelxl,
1032 struct intelxl_ring *ring ) {
1033 void *ring_regs = ( intelxl->regs + ring->reg );
1034 physaddr_t address;
1035 int rc;
1036
1037 /* Allocate descriptor ring */
1038 ring->desc = malloc_dma ( ring->len, INTELXL_ALIGN );
1039 if ( ! ring->desc ) {
1040 rc = -ENOMEM;
1041 goto err_alloc;
1042 }
1043
1044 /* Initialise descriptor ring */
1045 memset ( ring->desc, 0, ring->len );
1046
1047 /* Reset tail pointer */
1048 writel ( 0, ( ring_regs + INTELXL_QXX_TAIL ) );
1049
1050 /* Program queue context */
1051 address = virt_to_bus ( ring->desc );
1052 if ( ( rc = ring->context ( intelxl, address ) ) != 0 )
1053 goto err_context;
1054
1055 /* Enable ring */
1056 if ( ( rc = intelxl_enable_ring ( intelxl, ring ) ) != 0 )
1057 goto err_enable;
1058
1059 /* Reset counters */
1060 ring->prod = 0;
1061 ring->cons = 0;
1062
1063 DBGC ( intelxl, "INTELXL %p ring %06x is at [%08llx,%08llx)\n",
1064 intelxl, ring->reg, ( ( unsigned long long ) address ),
1065 ( ( unsigned long long ) address + ring->len ) );
1066
1067 return 0;
1068
1069 intelxl_disable_ring ( intelxl, ring );
1070 err_enable:
1071 err_context:
1072 free_dma ( ring->desc, ring->len );
1073 err_alloc:
1074 return rc;
1075 }
1076
1077 /**
1078 * Destroy descriptor ring
1079 *
1080 * @v intelxl Intel device
1081 * @v ring Descriptor ring
1082 */
1083 static void intelxl_destroy_ring ( struct intelxl_nic *intelxl,
1084 struct intelxl_ring *ring ) {
1085 int rc;
1086
1087 /* Disable ring */
1088 if ( ( rc = intelxl_disable_ring ( intelxl, ring ) ) != 0 ) {
1089 /* Leak memory; there's nothing else we can do */
1090 return;
1091 }
1092
1093 /* Free descriptor ring */
1094 free_dma ( ring->desc, ring->len );
1095 ring->desc = NULL;
1096 }
1097
1098 /**
1099 * Refill receive descriptor ring
1100 *
1101 * @v intelxl Intel device
1102 */
1103 static void intelxl_refill_rx ( struct intelxl_nic *intelxl ) {
1104 struct intelxl_rx_data_descriptor *rx;
1105 struct io_buffer *iobuf;
1106 unsigned int rx_idx;
1107 unsigned int rx_tail;
1108 physaddr_t address;
1109 unsigned int refilled = 0;
1110
1111 /* Refill ring */
1112 while ( ( intelxl->rx.prod - intelxl->rx.cons ) < INTELXL_RX_FILL ) {
1113
1114 /* Allocate I/O buffer */
1115 iobuf = alloc_iob ( intelxl->mfs );
1116 if ( ! iobuf ) {
1117 /* Wait for next refill */
1118 break;
1119 }
1120
1121 /* Get next receive descriptor */
1122 rx_idx = ( intelxl->rx.prod++ % INTELXL_RX_NUM_DESC );
1123 rx = &intelxl->rx.desc[rx_idx].rx;
1124
1125 /* Populate receive descriptor */
1126 address = virt_to_bus ( iobuf->data );
1127 rx->address = cpu_to_le64 ( address );
1128 rx->flags = 0;
1129
1130 /* Record I/O buffer */
1131 assert ( intelxl->rx_iobuf[rx_idx] == NULL );
1132 intelxl->rx_iobuf[rx_idx] = iobuf;
1133
1134 DBGC2 ( intelxl, "INTELXL %p RX %d is [%llx,%llx)\n", intelxl,
1135 rx_idx, ( ( unsigned long long ) address ),
1136 ( ( unsigned long long ) address + intelxl->mfs ) );
1137 refilled++;
1138 }
1139
1140 /* Push descriptors to card, if applicable */
1141 if ( refilled ) {
1142 wmb();
1143 rx_tail = ( intelxl->rx.prod % INTELXL_RX_NUM_DESC );
1144 writel ( rx_tail,
1145 ( intelxl->regs + intelxl->rx.reg + INTELXL_QXX_TAIL));
1146 }
1147 }
1148
1149 /******************************************************************************
1150 *
1151 * Network device interface
1152 *
1153 ******************************************************************************
1154 */
1155
1156 /**
1157 * Open network device
1158 *
1159 * @v netdev Network device
1160 * @ret rc Return status code
1161 */
1162 static int intelxl_open ( struct net_device *netdev ) {
1163 struct intelxl_nic *intelxl = netdev->priv;
1164 union intelxl_receive_address mac;
1165 unsigned int queue;
1166 uint32_t prtgl_sal;
1167 uint32_t prtgl_sah;
1168 int rc;
1169
1170 /* Calculate maximum frame size */
1171 intelxl->mfs = ( ( ETH_HLEN + netdev->mtu + 4 /* CRC */ +
1172 INTELXL_ALIGN - 1 ) & ~( INTELXL_ALIGN - 1 ) );
1173
1174 /* Program MAC address and maximum frame size */
1175 memset ( &mac, 0, sizeof ( mac ) );
1176 memcpy ( mac.raw, netdev->ll_addr, sizeof ( mac.raw ) );
1177 prtgl_sal = le32_to_cpu ( mac.reg.low );
1178 prtgl_sah = ( le32_to_cpu ( mac.reg.high ) |
1179 INTELXL_PRTGL_SAH_MFS_SET ( intelxl->mfs ) );
1180 writel ( prtgl_sal, intelxl->regs + INTELXL_PRTGL_SAL );
1181 writel ( prtgl_sah, intelxl->regs + INTELXL_PRTGL_SAH );
1182
1183 /* Associate transmit queue to PF */
1184 writel ( ( INTELXL_QXX_CTL_PFVF_Q_PF |
1185 INTELXL_QXX_CTL_PFVF_PF_INDX ( intelxl->pf ) ),
1186 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_CTL ) );
1187
1188 /* Clear transmit pre queue disable */
1189 queue = ( intelxl->base + intelxl->queue );
1190 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_CLEAR_QDIS |
1191 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1192 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1193
1194 /* Reset transmit queue head */
1195 writel ( 0, ( intelxl->regs + INTELXL_QTX_HEAD ( intelxl->queue ) ) );
1196
1197 /* Create receive descriptor ring */
1198 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->rx ) ) != 0 )
1199 goto err_create_rx;
1200
1201 /* Create transmit descriptor ring */
1202 if ( ( rc = intelxl_create_ring ( intelxl, &intelxl->tx ) ) != 0 )
1203 goto err_create_tx;
1204
1205 /* Fill receive ring */
1206 intelxl_refill_rx ( intelxl );
1207
1208 /* Restart autonegotiation */
1209 intelxl_admin_autoneg ( intelxl );
1210
1211 /* Update link state */
1212 intelxl_admin_link ( netdev );
1213
1214 return 0;
1215
1216 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1217 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1218 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1219 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1220 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1221 err_create_tx:
1222 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1223 err_create_rx:
1224 return rc;
1225 }
1226
1227 /**
1228 * Close network device
1229 *
1230 * @v netdev Network device
1231 */
1232 static void intelxl_close ( struct net_device *netdev ) {
1233 struct intelxl_nic *intelxl = netdev->priv;
1234 unsigned int queue;
1235 unsigned int i;
1236
1237 /* Dump contexts (for debugging) */
1238 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_TX,
1239 sizeof ( struct intelxl_context_tx ) );
1240 intelxl_context_dump ( intelxl, INTELXL_PFCM_LANCTXCTL_TYPE_RX,
1241 sizeof ( struct intelxl_context_rx ) );
1242
1243 /* Pre-disable transmit queue */
1244 queue = ( intelxl->base + intelxl->queue );
1245 writel ( ( INTELXL_GLLAN_TXPRE_QDIS_SET_QDIS |
1246 INTELXL_GLLAN_TXPRE_QDIS_QINDX ( queue ) ),
1247 ( intelxl->regs + INTELXL_GLLAN_TXPRE_QDIS ( queue ) ) );
1248 udelay ( INTELXL_QUEUE_PRE_DISABLE_DELAY_US );
1249
1250 /* Destroy transmit descriptor ring */
1251 intelxl_destroy_ring ( intelxl, &intelxl->tx );
1252
1253 /* Destroy receive descriptor ring */
1254 intelxl_destroy_ring ( intelxl, &intelxl->rx );
1255
1256 /* Discard any unused receive buffers */
1257 for ( i = 0 ; i < INTELXL_RX_NUM_DESC ; i++ ) {
1258 if ( intelxl->rx_iobuf[i] )
1259 free_iob ( intelxl->rx_iobuf[i] );
1260 intelxl->rx_iobuf[i] = NULL;
1261 }
1262 }
1263
1264 /**
1265 * Transmit packet
1266 *
1267 * @v netdev Network device
1268 * @v iobuf I/O buffer
1269 * @ret rc Return status code
1270 */
1271 static int intelxl_transmit ( struct net_device *netdev,
1272 struct io_buffer *iobuf ) {
1273 struct intelxl_nic *intelxl = netdev->priv;
1274 struct intelxl_tx_data_descriptor *tx;
1275 unsigned int tx_idx;
1276 unsigned int tx_tail;
1277 physaddr_t address;
1278 size_t len;
1279
1280 /* Get next transmit descriptor */
1281 if ( ( intelxl->tx.prod - intelxl->tx.cons ) >= INTELXL_TX_FILL ) {
1282 DBGC ( intelxl, "INTELXL %p out of transmit descriptors\n",
1283 intelxl );
1284 return -ENOBUFS;
1285 }
1286 tx_idx = ( intelxl->tx.prod++ % INTELXL_TX_NUM_DESC );
1287 tx_tail = ( intelxl->tx.prod % INTELXL_TX_NUM_DESC );
1288 tx = &intelxl->tx.desc[tx_idx].tx;
1289
1290 /* Populate transmit descriptor */
1291 address = virt_to_bus ( iobuf->data );
1292 len = iob_len ( iobuf );
1293 tx->address = cpu_to_le64 ( address );
1294 tx->len = cpu_to_le32 ( INTELXL_TX_DATA_LEN ( len ) );
1295 tx->flags = cpu_to_le32 ( INTELXL_TX_DATA_DTYP | INTELXL_TX_DATA_EOP |
1296 INTELXL_TX_DATA_RS | INTELXL_TX_DATA_JFDI );
1297 wmb();
1298
1299 /* Notify card that there are packets ready to transmit */
1300 writel ( tx_tail,
1301 ( intelxl->regs + intelxl->tx.reg + INTELXL_QXX_TAIL ) );
1302
1303 DBGC2 ( intelxl, "INTELXL %p TX %d is [%llx,%llx)\n", intelxl, tx_idx,
1304 ( ( unsigned long long ) address ),
1305 ( ( unsigned long long ) address + len ) );
1306 return 0;
1307 }
1308
1309 /**
1310 * Poll for completed packets
1311 *
1312 * @v netdev Network device
1313 */
1314 static void intelxl_poll_tx ( struct net_device *netdev ) {
1315 struct intelxl_nic *intelxl = netdev->priv;
1316 struct intelxl_tx_writeback_descriptor *tx_wb;
1317 unsigned int tx_idx;
1318
1319 /* Check for completed packets */
1320 while ( intelxl->tx.cons != intelxl->tx.prod ) {
1321
1322 /* Get next transmit descriptor */
1323 tx_idx = ( intelxl->tx.cons % INTELXL_TX_NUM_DESC );
1324 tx_wb = &intelxl->tx.desc[tx_idx].tx_wb;
1325
1326 /* Stop if descriptor is still in use */
1327 if ( ! ( tx_wb->flags & INTELXL_TX_WB_FL_DD ) )
1328 return;
1329 DBGC2 ( intelxl, "INTELXL %p TX %d complete\n",
1330 intelxl, tx_idx );
1331
1332 /* Complete TX descriptor */
1333 netdev_tx_complete_next ( netdev );
1334 intelxl->tx.cons++;
1335 }
1336 }
1337
1338 /**
1339 * Poll for received packets
1340 *
1341 * @v netdev Network device
1342 */
1343 static void intelxl_poll_rx ( struct net_device *netdev ) {
1344 struct intelxl_nic *intelxl = netdev->priv;
1345 struct intelxl_rx_writeback_descriptor *rx_wb;
1346 struct io_buffer *iobuf;
1347 unsigned int rx_idx;
1348 unsigned int tag;
1349 size_t len;
1350
1351 /* Check for received packets */
1352 while ( intelxl->rx.cons != intelxl->rx.prod ) {
1353
1354 /* Get next receive descriptor */
1355 rx_idx = ( intelxl->rx.cons % INTELXL_RX_NUM_DESC );
1356 rx_wb = &intelxl->rx.desc[rx_idx].rx_wb;
1357
1358 /* Stop if descriptor is still in use */
1359 if ( ! ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_DD ) ) )
1360 return;
1361
1362 /* Populate I/O buffer */
1363 iobuf = intelxl->rx_iobuf[rx_idx];
1364 intelxl->rx_iobuf[rx_idx] = NULL;
1365 len = INTELXL_RX_WB_LEN ( le32_to_cpu ( rx_wb->len ) );
1366 iob_put ( iobuf, len );
1367
1368 /* Find VLAN device, if applicable */
1369 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_VLAN ) ) {
1370 tag = VLAN_TAG ( le16_to_cpu ( rx_wb->vlan ) );
1371 } else {
1372 tag = 0;
1373 }
1374
1375 /* Hand off to network stack */
1376 if ( rx_wb->flags & cpu_to_le32 ( INTELXL_RX_WB_FL_RXE ) ) {
1377 DBGC ( intelxl, "INTELXL %p RX %d error (length %zd, "
1378 "flags %08x)\n", intelxl, rx_idx, len,
1379 le32_to_cpu ( rx_wb->flags ) );
1380 vlan_netdev_rx_err ( netdev, tag, iobuf, -EIO );
1381 } else {
1382 DBGC2 ( intelxl, "INTELXL %p RX %d complete (length "
1383 "%zd)\n", intelxl, rx_idx, len );
1384 vlan_netdev_rx ( netdev, tag, iobuf );
1385 }
1386 intelxl->rx.cons++;
1387 }
1388 }
1389
1390 /**
1391 * Poll for completed and received packets
1392 *
1393 * @v netdev Network device
1394 */
1395 static void intelxl_poll ( struct net_device *netdev ) {
1396 struct intelxl_nic *intelxl = netdev->priv;
1397
1398 /* Acknowledge interrupts, if applicable */
1399 if ( netdev_irq_enabled ( netdev ) ) {
1400 writel ( ( INTELXL_PFINT_DYN_CTL0_CLEARPBA |
1401 INTELXL_PFINT_DYN_CTL0_INTENA_MASK ),
1402 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1403 }
1404
1405 /* Poll for completed packets */
1406 intelxl_poll_tx ( netdev );
1407
1408 /* Poll for received packets */
1409 intelxl_poll_rx ( netdev );
1410
1411 /* Poll for admin events */
1412 intelxl_poll_admin ( netdev );
1413
1414 /* Refill RX ring */
1415 intelxl_refill_rx ( intelxl );
1416 }
1417
1418 /**
1419 * Enable or disable interrupts
1420 *
1421 * @v netdev Network device
1422 * @v enable Interrupts should be enabled
1423 */
1424 static void intelxl_irq ( struct net_device *netdev, int enable ) {
1425 struct intelxl_nic *intelxl = netdev->priv;
1426
1427 if ( enable ) {
1428 writel ( INTELXL_PFINT_DYN_CTL0_INTENA,
1429 intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1430 } else {
1431 writel ( 0, intelxl->regs + INTELXL_PFINT_DYN_CTL0 );
1432 }
1433 }
1434
1435 /** Network device operations */
1436 static struct net_device_operations intelxl_operations = {
1437 .open = intelxl_open,
1438 .close = intelxl_close,
1439 .transmit = intelxl_transmit,
1440 .poll = intelxl_poll,
1441 .irq = intelxl_irq,
1442 };
1443
1444 /******************************************************************************
1445 *
1446 * PCI interface
1447 *
1448 ******************************************************************************
1449 */
1450
1451 /**
1452 * Probe PCI device
1453 *
1454 * @v pci PCI device
1455 * @ret rc Return status code
1456 */
1457 static int intelxl_probe ( struct pci_device *pci ) {
1458 struct net_device *netdev;
1459 struct intelxl_nic *intelxl;
1460 uint32_t pfgen_portnum;
1461 uint32_t pflan_qalloc;
1462 int rc;
1463
1464 /* Allocate and initialise net device */
1465 netdev = alloc_etherdev ( sizeof ( *intelxl ) );
1466 if ( ! netdev ) {
1467 rc = -ENOMEM;
1468 goto err_alloc;
1469 }
1470 netdev_init ( netdev, &intelxl_operations );
1471 intelxl = netdev->priv;
1472 pci_set_drvdata ( pci, netdev );
1473 netdev->dev = &pci->dev;
1474 memset ( intelxl, 0, sizeof ( *intelxl ) );
1475 intelxl->pf = PCI_FUNC ( pci->busdevfn );
1476 intelxl_init_admin ( &intelxl->command, INTELXL_ADMIN_CMD,
1477 &intelxl_admin_offsets );
1478 intelxl_init_admin ( &intelxl->event, INTELXL_ADMIN_EVT,
1479 &intelxl_admin_offsets );
1480 intelxl_init_ring ( &intelxl->tx, INTELXL_TX_NUM_DESC,
1481 intelxl_context_tx );
1482 intelxl_init_ring ( &intelxl->rx, INTELXL_RX_NUM_DESC,
1483 intelxl_context_rx );
1484
1485 /* Fix up PCI device */
1486 adjust_pci_device ( pci );
1487
1488 /* Map registers */
1489 intelxl->regs = ioremap ( pci->membase, INTELXL_BAR_SIZE );
1490 if ( ! intelxl->regs ) {
1491 rc = -ENODEV;
1492 goto err_ioremap;
1493 }
1494
1495 /* Reset the NIC */
1496 if ( ( rc = intelxl_reset ( intelxl ) ) != 0 )
1497 goto err_reset;
1498
1499 /* Get port number and base queue number */
1500 pfgen_portnum = readl ( intelxl->regs + INTELXL_PFGEN_PORTNUM );
1501 intelxl->port = INTELXL_PFGEN_PORTNUM_PORT_NUM ( pfgen_portnum );
1502 pflan_qalloc = readl ( intelxl->regs + INTELXL_PFLAN_QALLOC );
1503 intelxl->base = INTELXL_PFLAN_QALLOC_FIRSTQ ( pflan_qalloc );
1504 DBGC ( intelxl, "INTELXL %p PF %d using port %d queues [%#04x-%#04x]\n",
1505 intelxl, intelxl->pf, intelxl->port, intelxl->base,
1506 INTELXL_PFLAN_QALLOC_LASTQ ( pflan_qalloc ) );
1507
1508 /* Fetch MAC address and maximum frame size */
1509 if ( ( rc = intelxl_fetch_mac ( intelxl, netdev ) ) != 0 )
1510 goto err_fetch_mac;
1511
1512 /* Open admin queues */
1513 if ( ( rc = intelxl_open_admin ( intelxl ) ) != 0 )
1514 goto err_open_admin;
1515
1516 /* Get switch configuration */
1517 if ( ( rc = intelxl_admin_switch ( intelxl ) ) != 0 )
1518 goto err_admin_switch;
1519
1520 /* Get VSI configuration */
1521 if ( ( rc = intelxl_admin_vsi ( intelxl ) ) != 0 )
1522 goto err_admin_vsi;
1523
1524 /* Configure switch for promiscuous mode */
1525 if ( ( rc = intelxl_admin_promisc ( intelxl ) ) != 0 )
1526 goto err_admin_promisc;
1527
1528 /* Configure queue register addresses */
1529 intelxl->tx.reg = INTELXL_QTX ( intelxl->queue );
1530 intelxl->rx.reg = INTELXL_QRX ( intelxl->queue );
1531
1532 /* Configure interrupt causes */
1533 writel ( ( INTELXL_QINT_TQCTL_NEXTQ_INDX_NONE |
1534 INTELXL_QINT_TQCTL_CAUSE_ENA ),
1535 intelxl->regs + INTELXL_QINT_TQCTL ( intelxl->queue ) );
1536 writel ( ( INTELXL_QINT_RQCTL_NEXTQ_INDX ( intelxl->queue ) |
1537 INTELXL_QINT_RQCTL_NEXTQ_TYPE_TX |
1538 INTELXL_QINT_RQCTL_CAUSE_ENA ),
1539 intelxl->regs + INTELXL_QINT_RQCTL ( intelxl->queue ) );
1540 writel ( ( INTELXL_PFINT_LNKLST0_FIRSTQ_INDX ( intelxl->queue ) |
1541 INTELXL_PFINT_LNKLST0_FIRSTQ_TYPE_RX ),
1542 intelxl->regs + INTELXL_PFINT_LNKLST0 );
1543 writel ( INTELXL_PFINT_ICR0_ENA_ADMINQ,
1544 intelxl->regs + INTELXL_PFINT_ICR0_ENA );
1545
1546 /* Register network device */
1547 if ( ( rc = register_netdev ( netdev ) ) != 0 )
1548 goto err_register_netdev;
1549
1550 /* Set initial link state */
1551 intelxl_admin_link ( netdev );
1552
1553 return 0;
1554
1555 unregister_netdev ( netdev );
1556 err_register_netdev:
1557 err_admin_promisc:
1558 err_admin_vsi:
1559 err_admin_switch:
1560 intelxl_close_admin ( intelxl );
1561 err_open_admin:
1562 err_fetch_mac:
1563 intelxl_reset ( intelxl );
1564 err_reset:
1565 iounmap ( intelxl->regs );
1566 err_ioremap:
1567 netdev_nullify ( netdev );
1568 netdev_put ( netdev );
1569 err_alloc:
1570 return rc;
1571 }
1572
1573 /**
1574 * Remove PCI device
1575 *
1576 * @v pci PCI device
1577 */
1578 static void intelxl_remove ( struct pci_device *pci ) {
1579 struct net_device *netdev = pci_get_drvdata ( pci );
1580 struct intelxl_nic *intelxl = netdev->priv;
1581
1582 /* Unregister network device */
1583 unregister_netdev ( netdev );
1584
1585 /* Close admin queues */
1586 intelxl_close_admin ( intelxl );
1587
1588 /* Reset the NIC */
1589 intelxl_reset ( intelxl );
1590
1591 /* Free network device */
1592 iounmap ( intelxl->regs );
1593 netdev_nullify ( netdev );
1594 netdev_put ( netdev );
1595 }
1596
1597 /** PCI device IDs */
1598 static struct pci_device_id intelxl_nics[] = {
1599 PCI_ROM ( 0x8086, 0x1572, "x710-sfp", "X710 10GbE SFP+", 0 ),
1600 PCI_ROM ( 0x8086, 0x1574, "xl710-qemu", "Virtual XL710", 0 ),
1601 PCI_ROM ( 0x8086, 0x1580, "xl710-kx-b", "XL710 40GbE backplane", 0 ),
1602 PCI_ROM ( 0x8086, 0x1581, "xl710-kx-c", "XL710 10GbE backplane", 0 ),
1603 PCI_ROM ( 0x8086, 0x1583, "xl710-qda2", "XL710 40GbE QSFP+", 0 ),
1604 PCI_ROM ( 0x8086, 0x1584, "xl710-qda1", "XL710 40GbE QSFP+", 0 ),
1605 PCI_ROM ( 0x8086, 0x1585, "x710-qsfp", "X710 10GbE QSFP+", 0 ),
1606 PCI_ROM ( 0x8086, 0x1586, "x710-10gt", "X710 10GBASE-T", 0 ),
1607 PCI_ROM ( 0x8086, 0x1587, "x710-kr2", "XL710 20GbE backplane", 0 ),
1608 PCI_ROM ( 0x8086, 0x1588, "x710-kr2-a", "XL710 20GbE backplane", 0 ),
1609 PCI_ROM ( 0x8086, 0x1589, "x710-10gt4", "X710 10GBASE-T4", 0 ),
1610 PCI_ROM ( 0x8086, 0x158a, "xxv710", "XXV710 25GbE backplane", 0 ),
1611 PCI_ROM ( 0x8086, 0x158b, "xxv710-sfp28", "XXV710 25GbE SFP28", 0 ),
1612 PCI_ROM ( 0x8086, 0x37ce, "x722-kx", "X722 10GbE backplane", 0 ),
1613 PCI_ROM ( 0x8086, 0x37cf, "x722-qsfp", "X722 10GbE QSFP+", 0 ),
1614 PCI_ROM ( 0x8086, 0x37d0, "x722-sfp", "X722 10GbE SFP+", 0 ),
1615 PCI_ROM ( 0x8086, 0x37d1, "x722-1gt", "X722 1GBASE-T", 0 ),
1616 PCI_ROM ( 0x8086, 0x37d2, "x722-10gt", "X722 10GBASE-T", 0 ),
1617 PCI_ROM ( 0x8086, 0x37d3, "x722-sfp-i", "X722 10GbE SFP+", 0 ),
1618 };
1619
1620 /** PCI driver */
1621 struct pci_driver intelxl_driver __pci_driver = {
1622 .ids = intelxl_nics,
1623 .id_count = ( sizeof ( intelxl_nics ) / sizeof ( intelxl_nics[0] ) ),
1624 .probe = intelxl_probe,
1625 .remove = intelxl_remove,
1626 };