[hyperv] Fix resource leaks on error path
[ipxe.git] / src / interface / hyperv / vmbus.c
1 /*
2 * Copyright (C) 2014 Michael Brown <mbrown@fensystems.co.uk>.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation; either version 2 of the
7 * License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA.
18 *
19 * You can also choose to distribute this program under the terms of
20 * the Unmodified Binary Distribution Licence (as given in the file
21 * COPYING.UBDL), provided that you have satisfied its requirements.
22 */
23
24 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
25
26 /** @file
27 *
28 * Hyper-V virtual machine bus
29 *
30 */
31
32 #include <stdint.h>
33 #include <stdlib.h>
34 #include <stdio.h>
35 #include <string.h>
36 #include <errno.h>
37 #include <assert.h>
38 #include <byteswap.h>
39 #include <ipxe/nap.h>
40 #include <ipxe/malloc.h>
41 #include <ipxe/iobuf.h>
42 #include <ipxe/bitops.h>
43 #include <ipxe/hyperv.h>
44 #include <ipxe/vmbus.h>
45
46 /** VMBus initial GPADL ID
47 *
48 * This is an opaque value with no meaning. The Linux kernel uses
49 * 0xe1e10.
50 */
51 #define VMBUS_GPADL_MAGIC 0x18ae0000
52
53 /**
54 * Post message
55 *
56 * @v hv Hyper-V hypervisor
57 * @v header Message header
58 * @v len Length of message (including header)
59 * @ret rc Return status code
60 */
61 static int vmbus_post_message ( struct hv_hypervisor *hv,
62 const struct vmbus_message_header *header,
63 size_t len ) {
64 struct vmbus *vmbus = hv->vmbus;
65 int rc;
66
67 /* Post message */
68 if ( ( rc = hv_post_message ( hv, VMBUS_MESSAGE_ID, VMBUS_MESSAGE_TYPE,
69 header, len ) ) != 0 ) {
70 DBGC ( vmbus, "VMBUS %p could not post message: %s\n",
71 vmbus, strerror ( rc ) );
72 return rc;
73 }
74
75 return 0;
76 }
77
78 /**
79 * Post empty message
80 *
81 * @v hv Hyper-V hypervisor
82 * @v type Message type
83 * @ret rc Return status code
84 */
85 static int vmbus_post_empty_message ( struct hv_hypervisor *hv,
86 unsigned int type ) {
87 struct vmbus_message_header header = { .type = cpu_to_le32 ( type ) };
88
89 return vmbus_post_message ( hv, &header, sizeof ( header ) );
90 }
91
92 /**
93 * Wait for received message of any type
94 *
95 * @v hv Hyper-V hypervisor
96 * @ret rc Return status code
97 */
98 static int vmbus_wait_for_any_message ( struct hv_hypervisor *hv ) {
99 struct vmbus *vmbus = hv->vmbus;
100 int rc;
101
102 /* Wait for message */
103 if ( ( rc = hv_wait_for_message ( hv, VMBUS_MESSAGE_SINT ) ) != 0 ) {
104 DBGC ( vmbus, "VMBUS %p failed waiting for message: %s\n",
105 vmbus, strerror ( rc ) );
106 return rc;
107 }
108
109 /* Sanity check */
110 if ( hv->message->received.type != cpu_to_le32 ( VMBUS_MESSAGE_TYPE ) ){
111 DBGC ( vmbus, "VMBUS %p invalid message type %d\n",
112 vmbus, le32_to_cpu ( hv->message->received.type ) );
113 return -EINVAL;
114 }
115
116 return 0;
117 }
118
119 /**
120 * Wait for received message of a specified type, ignoring any others
121 *
122 * @v hv Hyper-V hypervisor
123 * @v type Message type
124 * @ret rc Return status code
125 */
126 static int vmbus_wait_for_message ( struct hv_hypervisor *hv,
127 unsigned int type ) {
128 struct vmbus *vmbus = hv->vmbus;
129 const struct vmbus_message_header *header = &vmbus->message->header;
130 int rc;
131
132 /* Loop until specified message arrives, or until an error occurs */
133 while ( 1 ) {
134
135 /* Wait for message */
136 if ( ( rc = vmbus_wait_for_any_message ( hv ) ) != 0 )
137 return rc;
138
139 /* Check for requested message type */
140 if ( header->type == cpu_to_le32 ( type ) )
141 return 0;
142
143 /* Ignore any other messages (e.g. due to additional
144 * channels being offered at runtime).
145 */
146 DBGC ( vmbus, "VMBUS %p ignoring message type %d (expecting "
147 "%d)\n", vmbus, le32_to_cpu ( header->type ), type );
148 }
149 }
150
151 /**
152 * Initiate contact
153 *
154 * @v hv Hyper-V hypervisor
155 * @v raw VMBus protocol (raw) version
156 * @ret rc Return status code
157 */
158 static int vmbus_initiate_contact ( struct hv_hypervisor *hv,
159 unsigned int raw ) {
160 struct vmbus *vmbus = hv->vmbus;
161 const struct vmbus_version_response *version = &vmbus->message->version;
162 struct vmbus_initiate_contact initiate;
163 int rc;
164
165 /* Construct message */
166 memset ( &initiate, 0, sizeof ( initiate ) );
167 initiate.header.type = cpu_to_le32 ( VMBUS_INITIATE_CONTACT );
168 initiate.version.raw = cpu_to_le32 ( raw );
169 initiate.intr = virt_to_phys ( vmbus->intr );
170 initiate.monitor_in = virt_to_phys ( vmbus->monitor_in );
171 initiate.monitor_out = virt_to_phys ( vmbus->monitor_out );
172
173 /* Post message */
174 if ( ( rc = vmbus_post_message ( hv, &initiate.header,
175 sizeof ( initiate ) ) ) != 0 )
176 return rc;
177
178 /* Wait for response */
179 if ( ( rc = vmbus_wait_for_message ( hv, VMBUS_VERSION_RESPONSE ) ) !=0)
180 return rc;
181
182 /* Check response */
183 if ( ! version->supported ) {
184 DBGC ( vmbus, "VMBUS %p requested version not supported\n",
185 vmbus );
186 return -ENOTSUP;
187 }
188 if ( version->version.raw != cpu_to_le32 ( raw ) ) {
189 DBGC ( vmbus, "VMBUS %p unexpected version %d.%d\n",
190 vmbus, le16_to_cpu ( version->version.major ),
191 le16_to_cpu ( version->version.minor ) );
192 return -EPROTO;
193 }
194
195 DBGC ( vmbus, "VMBUS %p initiated contact using version %d.%d\n",
196 vmbus, le16_to_cpu ( version->version.major ),
197 le16_to_cpu ( version->version.minor ) );
198 return 0;
199 }
200
201 /**
202 * Terminate contact
203 *
204 * @v hv Hyper-V hypervisor
205 * @ret rc Return status code
206 */
207 static int vmbus_unload ( struct hv_hypervisor *hv ) {
208 int rc;
209
210 /* Post message */
211 if ( ( rc = vmbus_post_empty_message ( hv, VMBUS_UNLOAD ) ) != 0 )
212 return rc;
213
214 /* Wait for response */
215 if ( ( rc = vmbus_wait_for_message ( hv, VMBUS_UNLOAD_RESPONSE ) ) != 0)
216 return rc;
217
218 return 0;
219 }
220
221 /**
222 * Negotiate protocol version
223 *
224 * @v hv Hyper-V hypervisor
225 * @ret rc Return status code
226 */
227 static int vmbus_negotiate_version ( struct hv_hypervisor *hv ) {
228 int rc;
229
230 /* We require the ability to disconnect from and reconnect to
231 * VMBus; if we don't have this then there is no (viable) way
232 * for a loaded operating system to continue to use any VMBus
233 * devices. (There is also a small but non-zero risk that the
234 * host will continue to write to our interrupt and monitor
235 * pages, since the VMBUS_UNLOAD message in earlier versions
236 * is essentially a no-op.)
237 *
238 * This requires us to ensure that the host supports protocol
239 * version 3.0 (VMBUS_VERSION_WIN8_1). However, we can't
240 * actually _use_ protocol version 3.0, since doing so causes
241 * an iSCSI-booted Windows Server 2012 R2 VM to crash due to a
242 * NULL pointer dereference in vmbus.sys.
243 *
244 * To work around this problem, we first ensure that we can
245 * connect using protocol v3.0, then disconnect and reconnect
246 * using the oldest known protocol.
247 */
248
249 /* Initiate contact to check for required protocol support */
250 if ( ( rc = vmbus_initiate_contact ( hv, VMBUS_VERSION_WIN8_1 ) ) != 0 )
251 return rc;
252
253 /* Terminate contact */
254 if ( ( rc = vmbus_unload ( hv ) ) != 0 )
255 return rc;
256
257 /* Reinitiate contact using the oldest known protocol version */
258 if ( ( rc = vmbus_initiate_contact ( hv, VMBUS_VERSION_WS2008 ) ) != 0 )
259 return rc;
260
261 return 0;
262 }
263
264 /**
265 * Establish GPA descriptor list
266 *
267 * @v vmdev VMBus device
268 * @v data Data buffer
269 * @v len Length of data buffer
270 * @ret gpadl GPADL ID, or negative error
271 */
272 int vmbus_establish_gpadl ( struct vmbus_device *vmdev, userptr_t data,
273 size_t len ) {
274 struct hv_hypervisor *hv = vmdev->hv;
275 struct vmbus *vmbus = hv->vmbus;
276 physaddr_t addr = user_to_phys ( data, 0 );
277 unsigned int pfn_count = hv_pfn_count ( addr, len );
278 struct {
279 struct vmbus_gpadl_header gpadlhdr;
280 struct vmbus_gpa_range range;
281 uint64_t pfn[pfn_count];
282 } __attribute__ (( packed )) gpadlhdr;
283 const struct vmbus_gpadl_created *created = &vmbus->message->created;
284 static unsigned int gpadl = VMBUS_GPADL_MAGIC;
285 unsigned int i;
286 int rc;
287
288 /* Allocate GPADL ID */
289 gpadl++;
290
291 /* Construct message */
292 memset ( &gpadlhdr, 0, sizeof ( gpadlhdr ) );
293 gpadlhdr.gpadlhdr.header.type = cpu_to_le32 ( VMBUS_GPADL_HEADER );
294 gpadlhdr.gpadlhdr.channel = cpu_to_le32 ( vmdev->channel );
295 gpadlhdr.gpadlhdr.gpadl = cpu_to_le32 ( gpadl );
296 gpadlhdr.gpadlhdr.range_len =
297 cpu_to_le16 ( ( sizeof ( gpadlhdr.range ) +
298 sizeof ( gpadlhdr.pfn ) ) );
299 gpadlhdr.gpadlhdr.range_count = cpu_to_le16 ( 1 );
300 gpadlhdr.range.len = cpu_to_le32 ( len );
301 gpadlhdr.range.offset = cpu_to_le32 ( addr & ( PAGE_SIZE - 1 ) );
302 for ( i = 0 ; i < pfn_count ; i++ )
303 gpadlhdr.pfn[i] = ( ( addr / PAGE_SIZE ) + i );
304
305 /* Post message */
306 if ( ( rc = vmbus_post_message ( hv, &gpadlhdr.gpadlhdr.header,
307 sizeof ( gpadlhdr ) ) ) != 0 )
308 return rc;
309
310 /* Wait for response */
311 if ( ( rc = vmbus_wait_for_message ( hv, VMBUS_GPADL_CREATED ) ) != 0 )
312 return rc;
313
314 /* Check response */
315 if ( created->channel != cpu_to_le32 ( vmdev->channel ) ) {
316 DBGC ( vmdev, "VMBUS %s unexpected GPADL channel %d\n",
317 vmdev->dev.name, le32_to_cpu ( created->channel ) );
318 return -EPROTO;
319 }
320 if ( created->gpadl != cpu_to_le32 ( gpadl ) ) {
321 DBGC ( vmdev, "VMBUS %s unexpected GPADL ID %#08x\n",
322 vmdev->dev.name, le32_to_cpu ( created->gpadl ) );
323 return -EPROTO;
324 }
325 if ( created->status != 0 ) {
326 DBGC ( vmdev, "VMBUS %s GPADL creation failed: %#08x\n",
327 vmdev->dev.name, le32_to_cpu ( created->status ) );
328 return -EPROTO;
329 }
330
331 DBGC ( vmdev, "VMBUS %s GPADL %#08x is [%08lx,%08lx)\n",
332 vmdev->dev.name, gpadl, addr, ( addr + len ) );
333 return gpadl;
334 }
335
336 /**
337 * Tear down GPA descriptor list
338 *
339 * @v vmdev VMBus device
340 * @v gpadl GPADL ID
341 * @ret rc Return status code
342 */
343 int vmbus_gpadl_teardown ( struct vmbus_device *vmdev, unsigned int gpadl ) {
344 struct hv_hypervisor *hv = vmdev->hv;
345 struct vmbus *vmbus = hv->vmbus;
346 struct vmbus_gpadl_teardown teardown;
347 const struct vmbus_gpadl_torndown *torndown = &vmbus->message->torndown;
348 int rc;
349
350 /* Construct message */
351 memset ( &teardown, 0, sizeof ( teardown ) );
352 teardown.header.type = cpu_to_le32 ( VMBUS_GPADL_TEARDOWN );
353 teardown.channel = cpu_to_le32 ( vmdev->channel );
354 teardown.gpadl = cpu_to_le32 ( gpadl );
355
356 /* Post message */
357 if ( ( rc = vmbus_post_message ( hv, &teardown.header,
358 sizeof ( teardown ) ) ) != 0 )
359 return rc;
360
361 /* Wait for response */
362 if ( ( rc = vmbus_wait_for_message ( hv, VMBUS_GPADL_TORNDOWN ) ) != 0 )
363 return rc;
364
365 /* Check response */
366 if ( torndown->gpadl != cpu_to_le32 ( gpadl ) ) {
367 DBGC ( vmdev, "VMBUS %s unexpected GPADL ID %#08x\n",
368 vmdev->dev.name, le32_to_cpu ( torndown->gpadl ) );
369 return -EPROTO;
370 }
371
372 return 0;
373 }
374
375 /**
376 * Open VMBus channel
377 *
378 * @v vmdev VMBus device
379 * @v op Channel operations
380 * @v out_len Outbound ring buffer length
381 * @v in_len Inbound ring buffer length
382 * @v mtu Maximum expected data packet length (including headers)
383 * @ret rc Return status code
384 *
385 * Both outbound and inbound ring buffer lengths must be a power of
386 * two and a multiple of PAGE_SIZE. The requirement to be a power of
387 * two is a policy decision taken to simplify the ring buffer indexing
388 * logic.
389 */
390 int vmbus_open ( struct vmbus_device *vmdev,
391 struct vmbus_channel_operations *op,
392 size_t out_len, size_t in_len, size_t mtu ) {
393 struct hv_hypervisor *hv = vmdev->hv;
394 struct vmbus *vmbus = hv->vmbus;
395 struct vmbus_open_channel open;
396 const struct vmbus_open_channel_result *opened =
397 &vmbus->message->opened;
398 size_t len;
399 void *ring;
400 void *packet;
401 int gpadl;
402 uint32_t open_id;
403 int rc;
404
405 /* Sanity checks */
406 assert ( ( out_len % PAGE_SIZE ) == 0 );
407 assert ( ( out_len & ( out_len - 1 ) ) == 0 );
408 assert ( ( in_len % PAGE_SIZE ) == 0 );
409 assert ( ( in_len & ( in_len - 1 ) ) == 0 );
410 assert ( mtu >= ( sizeof ( struct vmbus_packet_header ) +
411 sizeof ( struct vmbus_packet_footer ) ) );
412
413 /* Allocate packet buffer */
414 packet = malloc ( mtu );
415 if ( ! packet ) {
416 rc = -ENOMEM;
417 goto err_alloc_packet;
418 }
419
420 /* Allocate ring buffer */
421 len = ( sizeof ( *vmdev->out ) + out_len +
422 sizeof ( *vmdev->in ) + in_len );
423 assert ( ( len % PAGE_SIZE ) == 0 );
424 ring = malloc_dma ( len, PAGE_SIZE );
425 if ( ! ring ) {
426 rc = -ENOMEM;
427 goto err_alloc_ring;
428 }
429 memset ( ring, 0, len );
430
431 /* Establish GPADL for ring buffer */
432 gpadl = vmbus_establish_gpadl ( vmdev, virt_to_user ( ring ), len );
433 if ( gpadl < 0 ) {
434 rc = gpadl;
435 goto err_establish;
436 }
437
438 /* Construct message */
439 memset ( &open, 0, sizeof ( open ) );
440 open.header.type = cpu_to_le32 ( VMBUS_OPEN_CHANNEL );
441 open.channel = cpu_to_le32 ( vmdev->channel );
442 open_id = random();
443 open.id = open_id; /* Opaque random value: endianness irrelevant */
444 open.gpadl = cpu_to_le32 ( gpadl );
445 open.out_pages = ( ( sizeof ( *vmdev->out ) / PAGE_SIZE ) +
446 ( out_len / PAGE_SIZE ) );
447
448 /* Post message */
449 if ( ( rc = vmbus_post_message ( hv, &open.header,
450 sizeof ( open ) ) ) != 0 )
451 goto err_post_message;
452
453 /* Wait for response */
454 if ( ( rc = vmbus_wait_for_message ( hv,
455 VMBUS_OPEN_CHANNEL_RESULT ) ) != 0)
456 goto err_wait_for_message;
457
458 /* Check response */
459 if ( opened->channel != cpu_to_le32 ( vmdev->channel ) ) {
460 DBGC ( vmdev, "VMBUS %s unexpected opened channel %#08x\n",
461 vmdev->dev.name, le32_to_cpu ( opened->channel ) );
462 rc = -EPROTO;
463 goto err_check_response;
464 }
465 if ( opened->id != open_id /* Non-endian */ ) {
466 DBGC ( vmdev, "VMBUS %s unexpected open ID %#08x\n",
467 vmdev->dev.name, le32_to_cpu ( opened->id ) );
468 rc = -EPROTO;
469 goto err_check_response;
470 }
471 if ( opened->status != 0 ) {
472 DBGC ( vmdev, "VMBUS %s open failed: %#08x\n",
473 vmdev->dev.name, le32_to_cpu ( opened->status ) );
474 rc = -EPROTO;
475 goto err_check_response;
476 }
477
478 /* Store channel parameters */
479 vmdev->out_len = out_len;
480 vmdev->in_len = in_len;
481 vmdev->out = ring;
482 vmdev->in = ( ring + sizeof ( *vmdev->out ) + out_len );
483 vmdev->gpadl = gpadl;
484 vmdev->op = op;
485 vmdev->mtu = mtu;
486 vmdev->packet = packet;
487
488 DBGC ( vmdev, "VMBUS %s channel GPADL %#08x ring "
489 "[%#08lx,%#08lx,%#08lx)\n", vmdev->dev.name, vmdev->gpadl,
490 virt_to_phys ( vmdev->out ), virt_to_phys ( vmdev->in ),
491 ( virt_to_phys ( vmdev->out ) + len ) );
492 return 0;
493
494 err_check_response:
495 err_wait_for_message:
496 err_post_message:
497 vmbus_gpadl_teardown ( vmdev, vmdev->gpadl );
498 err_establish:
499 free_dma ( ring, len );
500 err_alloc_ring:
501 free ( packet );
502 err_alloc_packet:
503 return rc;
504 }
505
506 /**
507 * Close VMBus channel
508 *
509 * @v vmdev VMBus device
510 */
511 void vmbus_close ( struct vmbus_device *vmdev ) {
512 struct hv_hypervisor *hv = vmdev->hv;
513 struct vmbus_close_channel close;
514 size_t len;
515 int rc;
516
517 /* Construct message */
518 memset ( &close, 0, sizeof ( close ) );
519 close.header.type = cpu_to_le32 ( VMBUS_CLOSE_CHANNEL );
520 close.channel = cpu_to_le32 ( vmdev->channel );
521
522 /* Post message */
523 if ( ( rc = vmbus_post_message ( hv, &close.header,
524 sizeof ( close ) ) ) != 0 ) {
525 DBGC ( vmdev, "VMBUS %s failed to close: %s\n",
526 vmdev->dev.name, strerror ( rc ) );
527 /* Continue to attempt to tear down GPADL, so that our
528 * memory is no longer accessible by the remote VM.
529 */
530 }
531
532 /* Tear down GPADL */
533 if ( ( rc = vmbus_gpadl_teardown ( vmdev,
534 vmdev->gpadl ) ) != 0 ) {
535 DBGC ( vmdev, "VMBUS %s failed to tear down channel GPADL: "
536 "%s\n", vmdev->dev.name, strerror ( rc ) );
537 /* We can't prevent the remote VM from continuing to
538 * access this memory, so leak it.
539 */
540 return;
541 }
542
543 /* Free ring buffer */
544 len = ( sizeof ( *vmdev->out ) + vmdev->out_len +
545 sizeof ( *vmdev->in ) + vmdev->in_len );
546 free_dma ( vmdev->out, len );
547 vmdev->out = NULL;
548 vmdev->in = NULL;
549
550 /* Free packet buffer */
551 free ( vmdev->packet );
552 vmdev->packet = NULL;
553
554 DBGC ( vmdev, "VMBUS %s closed\n", vmdev->dev.name );
555 }
556
557 /**
558 * Signal channel via monitor page
559 *
560 * @v vmdev VMBus device
561 */
562 static void vmbus_signal_monitor ( struct vmbus_device *vmdev ) {
563 struct hv_hypervisor *hv = vmdev->hv;
564 struct vmbus *vmbus = hv->vmbus;
565 struct hv_monitor_trigger *trigger;
566 unsigned int group;
567 unsigned int bit;
568
569 /* Set bit in monitor trigger group */
570 group = ( vmdev->monitor / ( 8 * sizeof ( trigger->pending ) ));
571 bit = ( vmdev->monitor % ( 8 * sizeof ( trigger->pending ) ) );
572 trigger = &vmbus->monitor_out->trigger[group];
573 set_bit ( bit, trigger );
574 }
575
576 /**
577 * Signal channel via hypervisor event
578 *
579 * @v vmdev VMBus device
580 */
581 static void vmbus_signal_event ( struct vmbus_device *vmdev ) {
582 struct hv_hypervisor *hv = vmdev->hv;
583 int rc;
584
585 /* Signal hypervisor event */
586 if ( ( rc = hv_signal_event ( hv, VMBUS_EVENT_ID, 0 ) ) != 0 ) {
587 DBGC ( vmdev, "VMBUS %s could not signal event: %s\n",
588 vmdev->dev.name, strerror ( rc ) );
589 return;
590 }
591 }
592
593 /**
594 * Fill outbound ring buffer
595 *
596 * @v vmdev VMBus device
597 * @v prod Producer index
598 * @v data Data
599 * @v len Length
600 * @ret prod New producer index
601 *
602 * The caller must ensure that there is sufficient space in the ring
603 * buffer.
604 */
605 static size_t vmbus_produce ( struct vmbus_device *vmdev, size_t prod,
606 const void *data, size_t len ) {
607 size_t first;
608 size_t second;
609
610 /* Determine fragment lengths */
611 first = ( vmdev->out_len - prod );
612 if ( first > len )
613 first = len;
614 second = ( len - first );
615
616 /* Copy fragment(s) */
617 memcpy ( &vmdev->out->data[prod], data, first );
618 if ( second )
619 memcpy ( &vmdev->out->data[0], ( data + first ), second );
620
621 return ( ( prod + len ) & ( vmdev->out_len - 1 ) );
622 }
623
624 /**
625 * Consume inbound ring buffer
626 *
627 * @v vmdev VMBus device
628 * @v cons Consumer index
629 * @v data Data buffer, or NULL
630 * @v len Length to consume
631 * @ret cons New consumer index
632 */
633 static size_t vmbus_consume ( struct vmbus_device *vmdev, size_t cons,
634 void *data, size_t len ) {
635 size_t first;
636 size_t second;
637
638 /* Determine fragment lengths */
639 first = ( vmdev->in_len - cons );
640 if ( first > len )
641 first = len;
642 second = ( len - first );
643
644 /* Copy fragment(s) */
645 memcpy ( data, &vmdev->in->data[cons], first );
646 if ( second )
647 memcpy ( ( data + first ), &vmdev->in->data[0], second );
648
649 return ( ( cons + len ) & ( vmdev->in_len - 1 ) );
650 }
651
652 /**
653 * Send packet via ring buffer
654 *
655 * @v vmdev VMBus device
656 * @v header Packet header
657 * @v data Data
658 * @v len Length of data
659 * @ret rc Return status code
660 *
661 * Send a packet via the outbound ring buffer. All fields in the
662 * packet header must be filled in, with the exception of the total
663 * packet length.
664 */
665 static int vmbus_send ( struct vmbus_device *vmdev,
666 struct vmbus_packet_header *header,
667 const void *data, size_t len ) {
668 struct hv_hypervisor *hv = vmdev->hv;
669 struct vmbus *vmbus = hv->vmbus;
670 static uint8_t padding[ 8 - 1 ];
671 struct vmbus_packet_footer footer;
672 size_t header_len;
673 size_t pad_len;
674 size_t footer_len;
675 size_t ring_len;
676 size_t cons;
677 size_t prod;
678 size_t old_prod;
679 size_t fill;
680
681 /* Sanity check */
682 assert ( vmdev->out != NULL );
683
684 /* Calculate lengths */
685 header_len = ( le16_to_cpu ( header->hdr_qlen ) * 8 );
686 pad_len = ( ( -len ) & ( 8 - 1 ) );
687 footer_len = sizeof ( footer );
688 ring_len = ( header_len + len + pad_len + footer_len );
689
690 /* Check that we have enough room in the outbound ring buffer */
691 cons = le32_to_cpu ( vmdev->out->cons );
692 prod = le32_to_cpu ( vmdev->out->prod );
693 old_prod = prod;
694 fill = ( ( prod - cons ) & ( vmdev->out_len - 1 ) );
695 if ( ( fill + ring_len ) >= vmdev->out_len ) {
696 DBGC ( vmdev, "VMBUS %s ring buffer full\n", vmdev->dev.name );
697 return -ENOBUFS;
698 }
699
700 /* Complete header */
701 header->qlen = cpu_to_le16 ( ( ring_len - footer_len ) / 8 );
702
703 /* Construct footer */
704 footer.reserved = 0;
705 footer.prod = vmdev->out->prod;
706
707 /* Copy packet to buffer */
708 DBGC2 ( vmdev, "VMBUS %s sending:\n", vmdev->dev.name );
709 DBGC2_HDA ( vmdev, prod, header, header_len );
710 prod = vmbus_produce ( vmdev, prod, header, header_len );
711 DBGC2_HDA ( vmdev, prod, data, len );
712 prod = vmbus_produce ( vmdev, prod, data, len );
713 prod = vmbus_produce ( vmdev, prod, padding, pad_len );
714 DBGC2_HDA ( vmdev, prod, &footer, sizeof ( footer ) );
715 prod = vmbus_produce ( vmdev, prod, &footer, sizeof ( footer ) );
716 assert ( ( ( prod - old_prod ) & ( vmdev->out_len - 1 ) ) == ring_len );
717
718 /* Update producer index */
719 wmb();
720 vmdev->out->prod = cpu_to_le32 ( prod );
721
722 /* Return if we do not need to signal the host. This follows
723 * the logic of hv_need_to_signal() in the Linux driver.
724 */
725 mb();
726 if ( vmdev->out->intr_mask )
727 return 0;
728 rmb();
729 cons = le32_to_cpu ( vmdev->out->cons );
730 if ( cons != old_prod )
731 return 0;
732
733 /* Set channel bit in interrupt page */
734 set_bit ( vmdev->channel, vmbus->intr->out );
735
736 /* Signal the host */
737 vmdev->signal ( vmdev );
738
739 return 0;
740 }
741
742 /**
743 * Send control packet via ring buffer
744 *
745 * @v vmdev VMBus device
746 * @v xid Transaction ID (or zero to not request completion)
747 * @v data Data
748 * @v len Length of data
749 * @ret rc Return status code
750 *
751 * Send data using a VMBUS_DATA_INBAND packet.
752 */
753 int vmbus_send_control ( struct vmbus_device *vmdev, uint64_t xid,
754 const void *data, size_t len ) {
755 struct vmbus_packet_header *header = vmdev->packet;
756
757 /* Construct header in packet buffer */
758 assert ( header != NULL );
759 header->type = cpu_to_le16 ( VMBUS_DATA_INBAND );
760 header->hdr_qlen = cpu_to_le16 ( sizeof ( *header ) / 8 );
761 header->flags = ( xid ?
762 cpu_to_le16 ( VMBUS_COMPLETION_REQUESTED ) : 0 );
763 header->xid = xid; /* Non-endian */
764
765 return vmbus_send ( vmdev, header, data, len );
766 }
767
768 /**
769 * Send data packet via ring buffer
770 *
771 * @v vmdev VMBus device
772 * @v xid Transaction ID
773 * @v data Data
774 * @v len Length of data
775 * @v iobuf I/O buffer
776 * @ret rc Return status code
777 *
778 * Send data using a VMBUS_DATA_GPA_DIRECT packet. The caller is
779 * responsible for ensuring that the I/O buffer remains untouched
780 * until the corresponding completion has been received.
781 */
782 int vmbus_send_data ( struct vmbus_device *vmdev, uint64_t xid,
783 const void *data, size_t len, struct io_buffer *iobuf ) {
784 physaddr_t addr = virt_to_phys ( iobuf->data );
785 unsigned int pfn_count = hv_pfn_count ( addr, iob_len ( iobuf ) );
786 struct {
787 struct vmbus_gpa_direct_header gpa;
788 struct vmbus_gpa_range range;
789 uint64_t pfn[pfn_count];
790 } __attribute__ (( packed )) *header = vmdev->packet;
791 unsigned int i;
792
793 /* Sanity check */
794 assert ( header != NULL );
795 assert ( sizeof ( *header ) <= vmdev->mtu );
796
797 /* Construct header in packet buffer */
798 header->gpa.header.type = cpu_to_le16 ( VMBUS_DATA_GPA_DIRECT );
799 header->gpa.header.hdr_qlen = cpu_to_le16 ( sizeof ( *header ) / 8 );
800 header->gpa.header.flags = cpu_to_le16 ( VMBUS_COMPLETION_REQUESTED );
801 header->gpa.header.xid = xid; /* Non-endian */
802 header->gpa.range_count = 1;
803 header->range.len = cpu_to_le32 ( iob_len ( iobuf ) );
804 header->range.offset = cpu_to_le32 ( addr & ( PAGE_SIZE - 1 ) );
805 for ( i = 0 ; i < pfn_count ; i++ )
806 header->pfn[i] = ( ( addr / PAGE_SIZE ) + i );
807
808 return vmbus_send ( vmdev, &header->gpa.header, data, len );
809 }
810
811 /**
812 * Send completion packet via ring buffer
813 *
814 * @v vmdev VMBus device
815 * @v xid Transaction ID
816 * @v data Data
817 * @v len Length of data
818 * @ret rc Return status code
819 *
820 * Send data using a VMBUS_COMPLETION packet.
821 */
822 int vmbus_send_completion ( struct vmbus_device *vmdev, uint64_t xid,
823 const void *data, size_t len ) {
824 struct vmbus_packet_header *header = vmdev->packet;
825
826 /* Construct header in packet buffer */
827 assert ( header != NULL );
828 header->type = cpu_to_le16 ( VMBUS_COMPLETION );
829 header->hdr_qlen = cpu_to_le16 ( sizeof ( *header ) / 8 );
830 header->flags = 0;
831 header->xid = xid; /* Non-endian */
832
833 return vmbus_send ( vmdev, header, data, len );
834 }
835
836 /**
837 * Send cancellation packet via ring buffer
838 *
839 * @v vmdev VMBus device
840 * @v xid Transaction ID
841 * @ret rc Return status code
842 *
843 * Send data using a VMBUS_CANCELLATION packet.
844 */
845 int vmbus_send_cancellation ( struct vmbus_device *vmdev, uint64_t xid ) {
846 struct vmbus_packet_header *header = vmdev->packet;
847
848 /* Construct header in packet buffer */
849 assert ( header != NULL );
850 header->type = cpu_to_le16 ( VMBUS_CANCELLATION );
851 header->hdr_qlen = cpu_to_le16 ( sizeof ( *header ) / 8 );
852 header->flags = 0;
853 header->xid = xid; /* Non-endian */
854
855 return vmbus_send ( vmdev, header, NULL, 0 );
856 }
857
858 /**
859 * Get transfer page set from pageset ID
860 *
861 * @v vmdev VMBus device
862 * @v pageset Page set ID (in protocol byte order)
863 * @ret pages Page set, or NULL if not found
864 */
865 static struct vmbus_xfer_pages * vmbus_xfer_pages ( struct vmbus_device *vmdev,
866 uint16_t pageset ) {
867 struct vmbus_xfer_pages *pages;
868
869 /* Locate page set */
870 list_for_each_entry ( pages, &vmdev->pages, list ) {
871 if ( pages->pageset == pageset )
872 return pages;
873 }
874
875 DBGC ( vmdev, "VMBUS %s unrecognised page set ID %#04x\n",
876 vmdev->dev.name, le16_to_cpu ( pageset ) );
877 return NULL;
878 }
879
880 /**
881 * Construct I/O buffer list from transfer pages
882 *
883 * @v vmdev VMBus device
884 * @v header Transfer page header
885 * @v list I/O buffer list to populate
886 * @ret rc Return status code
887 */
888 static int vmbus_xfer_page_iobufs ( struct vmbus_device *vmdev,
889 struct vmbus_packet_header *header,
890 struct list_head *list ) {
891 struct vmbus_xfer_page_header *page_header =
892 container_of ( header, struct vmbus_xfer_page_header, header );
893 struct vmbus_xfer_pages *pages;
894 struct io_buffer *iobuf;
895 struct io_buffer *tmp;
896 size_t len;
897 size_t offset;
898 unsigned int range_count;
899 unsigned int i;
900 int rc;
901
902 /* Sanity check */
903 assert ( header->type == cpu_to_le16 ( VMBUS_DATA_XFER_PAGES ) );
904
905 /* Locate page set */
906 pages = vmbus_xfer_pages ( vmdev, page_header->pageset );
907 if ( ! pages ) {
908 rc = -ENOENT;
909 goto err_pages;
910 }
911
912 /* Allocate and populate I/O buffers */
913 range_count = le32_to_cpu ( page_header->range_count );
914 for ( i = 0 ; i < range_count ; i++ ) {
915
916 /* Parse header */
917 len = le32_to_cpu ( page_header->range[i].len );
918 offset = le32_to_cpu ( page_header->range[i].offset );
919
920 /* Allocate I/O buffer */
921 iobuf = alloc_iob ( len );
922 if ( ! iobuf ) {
923 DBGC ( vmdev, "VMBUS %s could not allocate %zd-byte "
924 "I/O buffer\n", vmdev->dev.name, len );
925 rc = -ENOMEM;
926 goto err_alloc;
927 }
928
929 /* Add I/O buffer to list */
930 list_add ( &iobuf->list, list );
931
932 /* Populate I/O buffer */
933 if ( ( rc = pages->op->copy ( pages, iob_put ( iobuf, len ),
934 offset, len ) ) != 0 ) {
935 DBGC ( vmdev, "VMBUS %s could not populate I/O buffer "
936 "range [%zd,%zd): %s\n",
937 vmdev->dev.name, offset, len, strerror ( rc ) );
938 goto err_copy;
939 }
940 }
941
942 return 0;
943
944 err_copy:
945 err_alloc:
946 list_for_each_entry_safe ( iobuf, tmp, list, list ) {
947 list_del ( &iobuf->list );
948 free_iob ( iobuf );
949 }
950 err_pages:
951 return rc;
952 }
953
954 /**
955 * Poll ring buffer
956 *
957 * @v vmdev VMBus device
958 * @ret rc Return status code
959 */
960 int vmbus_poll ( struct vmbus_device *vmdev ) {
961 struct vmbus_packet_header *header = vmdev->packet;
962 struct list_head list;
963 void *data;
964 size_t header_len;
965 size_t len;
966 size_t footer_len;
967 size_t ring_len;
968 size_t cons;
969 size_t old_cons;
970 uint64_t xid;
971 int rc;
972
973 /* Sanity checks */
974 assert ( vmdev->packet != NULL );
975 assert ( vmdev->in != NULL );
976
977 /* Return immediately if buffer is empty */
978 if ( ! vmbus_has_data ( vmdev ) )
979 return 0;
980 cons = le32_to_cpu ( vmdev->in->cons );
981 old_cons = cons;
982
983 /* Consume (start of) header */
984 cons = vmbus_consume ( vmdev, cons, header, sizeof ( *header ) );
985
986 /* Parse and sanity check header */
987 header_len = ( le16_to_cpu ( header->hdr_qlen ) * 8 );
988 if ( header_len < sizeof ( *header ) ) {
989 DBGC ( vmdev, "VMBUS %s received underlength header (%zd "
990 "bytes)\n", vmdev->dev.name, header_len );
991 return -EINVAL;
992 }
993 len = ( ( le16_to_cpu ( header->qlen ) * 8 ) - header_len );
994 footer_len = sizeof ( struct vmbus_packet_footer );
995 ring_len = ( header_len + len + footer_len );
996 if ( ring_len > vmdev->mtu ) {
997 DBGC ( vmdev, "VMBUS %s received overlength packet (%zd "
998 "bytes)\n", vmdev->dev.name, ring_len );
999 return -ERANGE;
1000 }
1001 xid = le64_to_cpu ( header->xid );
1002
1003 /* Consume remainder of packet */
1004 cons = vmbus_consume ( vmdev, cons,
1005 ( ( ( void * ) header ) + sizeof ( *header ) ),
1006 ( ring_len - sizeof ( *header ) ) );
1007 DBGC2 ( vmdev, "VMBUS %s received:\n", vmdev->dev.name );
1008 DBGC2_HDA ( vmdev, old_cons, header, ring_len );
1009 assert ( ( ( cons - old_cons ) & ( vmdev->in_len - 1 ) ) == ring_len );
1010
1011 /* Allocate I/O buffers, if applicable */
1012 INIT_LIST_HEAD ( &list );
1013 if ( header->type == cpu_to_le16 ( VMBUS_DATA_XFER_PAGES ) ) {
1014 if ( ( rc = vmbus_xfer_page_iobufs ( vmdev, header,
1015 &list ) ) != 0 )
1016 return rc;
1017 }
1018
1019 /* Update producer index */
1020 rmb();
1021 vmdev->in->cons = cpu_to_le32 ( cons );
1022
1023 /* Handle packet */
1024 data = ( ( ( void * ) header ) + header_len );
1025 switch ( header->type ) {
1026
1027 case cpu_to_le16 ( VMBUS_DATA_INBAND ) :
1028 if ( ( rc = vmdev->op->recv_control ( vmdev, xid, data,
1029 len ) ) != 0 ) {
1030 DBGC ( vmdev, "VMBUS %s could not handle control "
1031 "packet: %s\n",
1032 vmdev->dev.name, strerror ( rc ) );
1033 return rc;
1034 }
1035 break;
1036
1037 case cpu_to_le16 ( VMBUS_DATA_XFER_PAGES ) :
1038 if ( ( rc = vmdev->op->recv_data ( vmdev, xid, data, len,
1039 &list ) ) != 0 ) {
1040 DBGC ( vmdev, "VMBUS %s could not handle data packet: "
1041 "%s\n", vmdev->dev.name, strerror ( rc ) );
1042 return rc;
1043 }
1044 break;
1045
1046 case cpu_to_le16 ( VMBUS_COMPLETION ) :
1047 if ( ( rc = vmdev->op->recv_completion ( vmdev, xid, data,
1048 len ) ) != 0 ) {
1049 DBGC ( vmdev, "VMBUS %s could not handle completion: "
1050 "%s\n", vmdev->dev.name, strerror ( rc ) );
1051 return rc;
1052 }
1053 break;
1054
1055 case cpu_to_le16 ( VMBUS_CANCELLATION ) :
1056 if ( ( rc = vmdev->op->recv_cancellation ( vmdev, xid ) ) != 0){
1057 DBGC ( vmdev, "VMBUS %s could not handle cancellation: "
1058 "%s\n", vmdev->dev.name, strerror ( rc ) );
1059 return rc;
1060 }
1061 break;
1062
1063 default:
1064 DBGC ( vmdev, "VMBUS %s unknown packet type %d\n",
1065 vmdev->dev.name, le16_to_cpu ( header->type ) );
1066 return -ENOTSUP;
1067 }
1068
1069 return 0;
1070 }
1071
1072 /**
1073 * Dump channel status (for debugging)
1074 *
1075 * @v vmdev VMBus device
1076 */
1077 void vmbus_dump_channel ( struct vmbus_device *vmdev ) {
1078 size_t out_prod = le32_to_cpu ( vmdev->out->prod );
1079 size_t out_cons = le32_to_cpu ( vmdev->out->cons );
1080 size_t in_prod = le32_to_cpu ( vmdev->in->prod );
1081 size_t in_cons = le32_to_cpu ( vmdev->in->cons );
1082 size_t in_len;
1083 size_t first;
1084 size_t second;
1085
1086 /* Dump ring status */
1087 DBGC ( vmdev, "VMBUS %s out %03zx:%03zx%s in %03zx:%03zx%s\n",
1088 vmdev->dev.name, out_prod, out_cons,
1089 ( vmdev->out->intr_mask ? "(m)" : "" ), in_prod, in_cons,
1090 ( vmdev->in->intr_mask ? "(m)" : "" ) );
1091
1092 /* Dump inbound ring contents, if any */
1093 if ( in_prod != in_cons ) {
1094 in_len = ( ( in_prod - in_cons ) &
1095 ( vmdev->in_len - 1 ) );
1096 first = ( vmdev->in_len - in_cons );
1097 if ( first > in_len )
1098 first = in_len;
1099 second = ( in_len - first );
1100 DBGC_HDA ( vmdev, in_cons, &vmdev->in->data[in_cons], first );
1101 DBGC_HDA ( vmdev, 0, &vmdev->in->data[0], second );
1102 }
1103 }
1104
1105 /**
1106 * Find driver for VMBus device
1107 *
1108 * @v vmdev VMBus device
1109 * @ret driver Driver, or NULL
1110 */
1111 static struct vmbus_driver * vmbus_find_driver ( const union uuid *type ) {
1112 struct vmbus_driver *vmdrv;
1113
1114 for_each_table_entry ( vmdrv, VMBUS_DRIVERS ) {
1115 if ( memcmp ( &vmdrv->type, type, sizeof ( *type ) ) == 0 )
1116 return vmdrv;
1117 }
1118 return NULL;
1119 }
1120
1121 /**
1122 * Probe channels
1123 *
1124 * @v hv Hyper-V hypervisor
1125 * @v parent Parent device
1126 * @ret rc Return status code
1127 */
1128 static int vmbus_probe_channels ( struct hv_hypervisor *hv,
1129 struct device *parent ) {
1130 struct vmbus *vmbus = hv->vmbus;
1131 const struct vmbus_message_header *header = &vmbus->message->header;
1132 const struct vmbus_offer_channel *offer = &vmbus->message->offer;
1133 const union uuid *type;
1134 union uuid instance;
1135 struct vmbus_driver *driver;
1136 struct vmbus_device *vmdev;
1137 struct vmbus_device *tmp;
1138 unsigned int channel;
1139 int rc;
1140
1141 /* Post message */
1142 if ( ( rc = vmbus_post_empty_message ( hv, VMBUS_REQUEST_OFFERS ) ) !=0)
1143 goto err_post_message;
1144
1145 /* Collect responses */
1146 while ( 1 ) {
1147
1148 /* Wait for response */
1149 if ( ( rc = vmbus_wait_for_any_message ( hv ) ) != 0 )
1150 goto err_wait_for_any_message;
1151
1152 /* Handle response */
1153 if ( header->type == cpu_to_le32 ( VMBUS_OFFER_CHANNEL ) ) {
1154
1155 /* Parse offer */
1156 type = &offer->type;
1157 channel = le32_to_cpu ( offer->channel );
1158 DBGC2 ( vmbus, "VMBUS %p offer %d type %s",
1159 vmbus, channel, uuid_ntoa ( type ) );
1160 if ( offer->monitored )
1161 DBGC2 ( vmbus, " monitor %d", offer->monitor );
1162 DBGC2 ( vmbus, "\n" );
1163
1164 /* Look for a driver */
1165 driver = vmbus_find_driver ( type );
1166 if ( ! driver ) {
1167 DBGC2 ( vmbus, "VMBUS %p has no driver for "
1168 "type %s\n", vmbus, uuid_ntoa ( type ));
1169 /* Not a fatal error */
1170 continue;
1171 }
1172
1173 /* Allocate and initialise device */
1174 vmdev = zalloc ( sizeof ( *vmdev ) );
1175 if ( ! vmdev ) {
1176 rc = -ENOMEM;
1177 goto err_alloc_vmdev;
1178 }
1179 memcpy ( &instance, &offer->instance,
1180 sizeof ( instance ) );
1181 uuid_mangle ( &instance );
1182 snprintf ( vmdev->dev.name, sizeof ( vmdev->dev.name ),
1183 "{%s}", uuid_ntoa ( &instance ) );
1184 vmdev->dev.desc.bus_type = BUS_TYPE_HV;
1185 INIT_LIST_HEAD ( &vmdev->dev.children );
1186 list_add_tail ( &vmdev->dev.siblings,
1187 &parent->children );
1188 vmdev->dev.parent = parent;
1189 vmdev->hv = hv;
1190 vmdev->channel = channel;
1191 vmdev->monitor = offer->monitor;
1192 vmdev->signal = ( offer->monitored ?
1193 vmbus_signal_monitor :
1194 vmbus_signal_event );
1195 INIT_LIST_HEAD ( &vmdev->pages );
1196 vmdev->driver = driver;
1197 vmdev->dev.driver_name = driver->name;
1198 DBGC ( vmdev, "VMBUS %s has driver \"%s\"\n",
1199 vmdev->dev.name, vmdev->driver->name );
1200
1201 } else if ( header->type ==
1202 cpu_to_le32 ( VMBUS_ALL_OFFERS_DELIVERED ) ) {
1203
1204 break;
1205
1206 } else {
1207 DBGC ( vmbus, "VMBUS %p unexpected offer response type "
1208 "%d\n", vmbus, le32_to_cpu ( header->type ) );
1209 rc = -EPROTO;
1210 goto err_unexpected_offer;
1211 }
1212 }
1213
1214 /* Probe all devices. We do this only after completing
1215 * enumeration since devices will need to send and receive
1216 * VMBus messages.
1217 */
1218 list_for_each_entry ( vmdev, &parent->children, dev.siblings ) {
1219 if ( ( rc = vmdev->driver->probe ( vmdev ) ) != 0 ) {
1220 DBGC ( vmdev, "VMBUS %s could not probe: %s\n",
1221 vmdev->dev.name, strerror ( rc ) );
1222 goto err_probe;
1223 }
1224 }
1225
1226 return 0;
1227
1228 err_probe:
1229 /* Remove driver from each device that was already probed */
1230 list_for_each_entry_continue_reverse ( vmdev, &parent->children,
1231 dev.siblings ) {
1232 vmdev->driver->remove ( vmdev );
1233 }
1234 err_unexpected_offer:
1235 err_alloc_vmdev:
1236 err_wait_for_any_message:
1237 /* Free any devices allocated (but potentially not yet probed) */
1238 list_for_each_entry_safe ( vmdev, tmp, &parent->children,
1239 dev.siblings ) {
1240 list_del ( &vmdev->dev.siblings );
1241 free ( vmdev );
1242 }
1243 err_post_message:
1244 return rc;
1245 }
1246
1247 /**
1248 * Remove channels
1249 *
1250 * @v hv Hyper-V hypervisor
1251 * @v parent Parent device
1252 */
1253 static void vmbus_remove_channels ( struct hv_hypervisor *hv __unused,
1254 struct device *parent ) {
1255 struct vmbus_device *vmdev;
1256 struct vmbus_device *tmp;
1257
1258 /* Remove devices */
1259 list_for_each_entry_safe ( vmdev, tmp, &parent->children,
1260 dev.siblings ) {
1261 vmdev->driver->remove ( vmdev );
1262 assert ( list_empty ( &vmdev->dev.children ) );
1263 assert ( vmdev->out == NULL );
1264 assert ( vmdev->in == NULL );
1265 assert ( vmdev->packet == NULL );
1266 assert ( list_empty ( &vmdev->pages ) );
1267 list_del ( &vmdev->dev.siblings );
1268 free ( vmdev );
1269 }
1270 }
1271
1272 /**
1273 * Probe Hyper-V virtual machine bus
1274 *
1275 * @v hv Hyper-V hypervisor
1276 * @v parent Parent device
1277 * @ret rc Return status code
1278 */
1279 int vmbus_probe ( struct hv_hypervisor *hv, struct device *parent ) {
1280 struct vmbus *vmbus;
1281 int rc;
1282
1283 /* Allocate and initialise structure */
1284 vmbus = zalloc ( sizeof ( *vmbus ) );
1285 if ( ! vmbus ) {
1286 rc = -ENOMEM;
1287 goto err_alloc;
1288 }
1289 hv->vmbus = vmbus;
1290
1291 /* Initialise message buffer pointer
1292 *
1293 * We use a pointer to the fixed-size Hyper-V received message
1294 * buffer. This allows us to access fields within received
1295 * messages without first checking the message size: any
1296 * fields beyond the end of the message will read as zero.
1297 */
1298 vmbus->message = ( ( void * ) hv->message->received.data );
1299 assert ( sizeof ( *vmbus->message ) <=
1300 sizeof ( hv->message->received.data ) );
1301
1302 /* Allocate interrupt and monitor pages */
1303 if ( ( rc = hv_alloc_pages ( hv, &vmbus->intr, &vmbus->monitor_in,
1304 &vmbus->monitor_out, NULL ) ) != 0 )
1305 goto err_alloc_pages;
1306
1307 /* Enable message interrupt */
1308 hv_enable_sint ( hv, VMBUS_MESSAGE_SINT );
1309
1310 /* Negotiate protocol version */
1311 if ( ( rc = vmbus_negotiate_version ( hv ) ) != 0 )
1312 goto err_negotiate_version;
1313
1314 /* Enumerate channels */
1315 if ( ( rc = vmbus_probe_channels ( hv, parent ) ) != 0 )
1316 goto err_probe_channels;
1317
1318 return 0;
1319
1320 vmbus_remove_channels ( hv, parent );
1321 err_probe_channels:
1322 vmbus_unload ( hv );
1323 err_negotiate_version:
1324 hv_disable_sint ( hv, VMBUS_MESSAGE_SINT );
1325 hv_free_pages ( hv, vmbus->intr, vmbus->monitor_in, vmbus->monitor_out,
1326 NULL );
1327 err_alloc_pages:
1328 free ( vmbus );
1329 err_alloc:
1330 return rc;
1331 }
1332
1333 /**
1334 * Remove Hyper-V virtual machine bus
1335 *
1336 * @v hv Hyper-V hypervisor
1337 * @v parent Parent device
1338 */
1339 void vmbus_remove ( struct hv_hypervisor *hv, struct device *parent ) {
1340 struct vmbus *vmbus = hv->vmbus;
1341
1342 vmbus_remove_channels ( hv, parent );
1343 vmbus_unload ( hv );
1344 hv_disable_sint ( hv, VMBUS_MESSAGE_SINT );
1345 hv_free_pages ( hv, vmbus->intr, vmbus->monitor_in, vmbus->monitor_out,
1346 NULL );
1347 free ( vmbus );
1348 }