vmxnet3: Use common MAC address tracing macros
[qemu.git] / hw / ide / macio.c
1 /*
2 * QEMU IDE Emulation: MacIO support.
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25 #include "qemu/osdep.h"
26 #include "hw/hw.h"
27 #include "hw/ppc/mac.h"
28 #include "hw/ppc/mac_dbdma.h"
29 #include "sysemu/block-backend.h"
30 #include "sysemu/dma.h"
31
32 #include <hw/ide/internal.h>
33
34 /* debug MACIO */
35 // #define DEBUG_MACIO
36
37 #ifdef DEBUG_MACIO
38 static const int debug_macio = 1;
39 #else
40 static const int debug_macio = 0;
41 #endif
42
43 #define MACIO_DPRINTF(fmt, ...) do { \
44 if (debug_macio) { \
45 printf(fmt , ## __VA_ARGS__); \
46 } \
47 } while (0)
48
49
50 /***********************************************************/
51 /* MacIO based PowerPC IDE */
52
53 #define MACIO_PAGE_SIZE 4096
54
55 /*
56 * Unaligned DMA read/write access functions required for OS X/Darwin which
57 * don't perform DMA transactions on sector boundaries. These functions are
58 * modelled on bdrv_co_preadv()/bdrv_co_pwritev() and so should be easy to
59 * remove if the unaligned block APIs are ever exposed.
60 */
61
62 static void pmac_dma_read(BlockBackend *blk,
63 int64_t offset, unsigned int bytes,
64 void (*cb)(void *opaque, int ret), void *opaque)
65 {
66 DBDMA_io *io = opaque;
67 MACIOIDEState *m = io->opaque;
68 IDEState *s = idebus_active_if(&m->bus);
69 dma_addr_t dma_addr, dma_len;
70 void *mem;
71 int64_t sector_num;
72 int nsector;
73 uint64_t align = BDRV_SECTOR_SIZE;
74 size_t head_bytes, tail_bytes;
75
76 qemu_iovec_destroy(&io->iov);
77 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
78
79 sector_num = (offset >> 9);
80 nsector = (io->len >> 9);
81
82 MACIO_DPRINTF("--- DMA read transfer (0x%" HWADDR_PRIx ",0x%x): "
83 "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
84 sector_num, nsector);
85
86 dma_addr = io->addr;
87 dma_len = io->len;
88 mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
89 DMA_DIRECTION_FROM_DEVICE);
90
91 if (offset & (align - 1)) {
92 head_bytes = offset & (align - 1);
93
94 MACIO_DPRINTF("--- DMA unaligned head: sector %" PRId64 ", "
95 "discarding %zu bytes\n", sector_num, head_bytes);
96
97 qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
98
99 bytes += offset & (align - 1);
100 offset = offset & ~(align - 1);
101 }
102
103 qemu_iovec_add(&io->iov, mem, io->len);
104
105 if ((offset + bytes) & (align - 1)) {
106 tail_bytes = (offset + bytes) & (align - 1);
107
108 MACIO_DPRINTF("--- DMA unaligned tail: sector %" PRId64 ", "
109 "discarding bytes %zu\n", sector_num, tail_bytes);
110
111 qemu_iovec_add(&io->iov, &io->tail_remainder, align - tail_bytes);
112 bytes = ROUND_UP(bytes, align);
113 }
114
115 s->io_buffer_size -= io->len;
116 s->io_buffer_index += io->len;
117
118 io->len = 0;
119
120 MACIO_DPRINTF("--- Block read transfer - sector_num: %" PRIx64 " "
121 "nsector: %x\n", (offset >> 9), (bytes >> 9));
122
123 s->bus->dma->aiocb = blk_aio_preadv(blk, offset, &io->iov, 0, cb, io);
124 }
125
126 static void pmac_dma_write(BlockBackend *blk,
127 int64_t offset, int bytes,
128 void (*cb)(void *opaque, int ret), void *opaque)
129 {
130 DBDMA_io *io = opaque;
131 MACIOIDEState *m = io->opaque;
132 IDEState *s = idebus_active_if(&m->bus);
133 dma_addr_t dma_addr, dma_len;
134 void *mem;
135 int64_t sector_num;
136 int nsector;
137 uint64_t align = BDRV_SECTOR_SIZE;
138 size_t head_bytes, tail_bytes;
139 bool unaligned_head = false, unaligned_tail = false;
140
141 qemu_iovec_destroy(&io->iov);
142 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
143
144 sector_num = (offset >> 9);
145 nsector = (io->len >> 9);
146
147 MACIO_DPRINTF("--- DMA write transfer (0x%" HWADDR_PRIx ",0x%x): "
148 "sector_num: %" PRId64 ", nsector: %d\n", io->addr, io->len,
149 sector_num, nsector);
150
151 dma_addr = io->addr;
152 dma_len = io->len;
153 mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
154 DMA_DIRECTION_TO_DEVICE);
155
156 if (offset & (align - 1)) {
157 head_bytes = offset & (align - 1);
158 sector_num = ((offset & ~(align - 1)) >> 9);
159
160 MACIO_DPRINTF("--- DMA unaligned head: pre-reading head sector %"
161 PRId64 "\n", sector_num);
162
163 blk_pread(s->blk, (sector_num << 9), &io->head_remainder, align);
164
165 qemu_iovec_add(&io->iov, &io->head_remainder, head_bytes);
166 qemu_iovec_add(&io->iov, mem, io->len);
167
168 bytes += offset & (align - 1);
169 offset = offset & ~(align - 1);
170
171 unaligned_head = true;
172 }
173
174 if ((offset + bytes) & (align - 1)) {
175 tail_bytes = (offset + bytes) & (align - 1);
176 sector_num = (((offset + bytes) & ~(align - 1)) >> 9);
177
178 MACIO_DPRINTF("--- DMA unaligned tail: pre-reading tail sector %"
179 PRId64 "\n", sector_num);
180
181 blk_pread(s->blk, (sector_num << 9), &io->tail_remainder, align);
182
183 if (!unaligned_head) {
184 qemu_iovec_add(&io->iov, mem, io->len);
185 }
186
187 qemu_iovec_add(&io->iov, &io->tail_remainder + tail_bytes,
188 align - tail_bytes);
189
190 bytes = ROUND_UP(bytes, align);
191
192 unaligned_tail = true;
193 }
194
195 if (!unaligned_head && !unaligned_tail) {
196 qemu_iovec_add(&io->iov, mem, io->len);
197 }
198
199 s->io_buffer_size -= io->len;
200 s->io_buffer_index += io->len;
201
202 io->len = 0;
203
204 MACIO_DPRINTF("--- Block write transfer - sector_num: %" PRIx64 " "
205 "nsector: %x\n", (offset >> 9), (bytes >> 9));
206
207 s->bus->dma->aiocb = blk_aio_pwritev(blk, offset, &io->iov, 0, cb, io);
208 }
209
210 static void pmac_dma_trim(BlockBackend *blk,
211 int64_t offset, int bytes,
212 void (*cb)(void *opaque, int ret), void *opaque)
213 {
214 DBDMA_io *io = opaque;
215 MACIOIDEState *m = io->opaque;
216 IDEState *s = idebus_active_if(&m->bus);
217 dma_addr_t dma_addr, dma_len;
218 void *mem;
219
220 qemu_iovec_destroy(&io->iov);
221 qemu_iovec_init(&io->iov, io->len / MACIO_PAGE_SIZE + 1);
222
223 dma_addr = io->addr;
224 dma_len = io->len;
225 mem = dma_memory_map(&address_space_memory, dma_addr, &dma_len,
226 DMA_DIRECTION_TO_DEVICE);
227
228 qemu_iovec_add(&io->iov, mem, io->len);
229 s->io_buffer_size -= io->len;
230 s->io_buffer_index += io->len;
231 io->len = 0;
232
233 s->bus->dma->aiocb = ide_issue_trim(blk, offset, &io->iov, 0, cb, io);
234 }
235
236 static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
237 {
238 DBDMA_io *io = opaque;
239 MACIOIDEState *m = io->opaque;
240 IDEState *s = idebus_active_if(&m->bus);
241 int64_t offset;
242
243 MACIO_DPRINTF("pmac_ide_atapi_transfer_cb\n");
244
245 if (ret < 0) {
246 MACIO_DPRINTF("DMA error: %d\n", ret);
247 ide_atapi_io_error(s, ret);
248 goto done;
249 }
250
251 if (!m->dma_active) {
252 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
253 s->nsector, io->len, s->status);
254 /* data not ready yet, wait for the channel to get restarted */
255 io->processing = false;
256 return;
257 }
258
259 if (s->io_buffer_size <= 0) {
260 MACIO_DPRINTF("End of IDE transfer\n");
261 ide_atapi_cmd_ok(s);
262 m->dma_active = false;
263 goto done;
264 }
265
266 if (io->len == 0) {
267 MACIO_DPRINTF("End of DMA transfer\n");
268 goto done;
269 }
270
271 if (s->lba == -1) {
272 /* Non-block ATAPI transfer - just copy to RAM */
273 s->io_buffer_size = MIN(s->io_buffer_size, io->len);
274 cpu_physical_memory_write(io->addr, s->io_buffer, s->io_buffer_size);
275 ide_atapi_cmd_ok(s);
276 m->dma_active = false;
277 goto done;
278 }
279
280 /* Calculate current offset */
281 offset = ((int64_t)s->lba << 11) + s->io_buffer_index;
282
283 pmac_dma_read(s->blk, offset, io->len, pmac_ide_atapi_transfer_cb, io);
284 return;
285
286 done:
287 if (ret < 0) {
288 block_acct_failed(blk_get_stats(s->blk), &s->acct);
289 } else {
290 block_acct_done(blk_get_stats(s->blk), &s->acct);
291 }
292
293 ide_set_inactive(s, false);
294 io->dma_end(opaque);
295 }
296
297 static void pmac_ide_transfer_cb(void *opaque, int ret)
298 {
299 DBDMA_io *io = opaque;
300 MACIOIDEState *m = io->opaque;
301 IDEState *s = idebus_active_if(&m->bus);
302 int64_t offset;
303
304 MACIO_DPRINTF("pmac_ide_transfer_cb\n");
305
306 if (ret < 0) {
307 MACIO_DPRINTF("DMA error: %d\n", ret);
308 ide_dma_error(s);
309 goto done;
310 }
311
312 if (!m->dma_active) {
313 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
314 s->nsector, io->len, s->status);
315 /* data not ready yet, wait for the channel to get restarted */
316 io->processing = false;
317 return;
318 }
319
320 if (s->io_buffer_size <= 0) {
321 MACIO_DPRINTF("End of IDE transfer\n");
322 s->status = READY_STAT | SEEK_STAT;
323 ide_set_irq(s->bus);
324 m->dma_active = false;
325 goto done;
326 }
327
328 if (io->len == 0) {
329 MACIO_DPRINTF("End of DMA transfer\n");
330 goto done;
331 }
332
333 /* Calculate number of sectors */
334 offset = (ide_get_sector(s) << 9) + s->io_buffer_index;
335
336 switch (s->dma_cmd) {
337 case IDE_DMA_READ:
338 pmac_dma_read(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
339 break;
340 case IDE_DMA_WRITE:
341 pmac_dma_write(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
342 break;
343 case IDE_DMA_TRIM:
344 pmac_dma_trim(s->blk, offset, io->len, pmac_ide_transfer_cb, io);
345 break;
346 default:
347 abort();
348 }
349
350 return;
351
352 done:
353 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
354 if (ret < 0) {
355 block_acct_failed(blk_get_stats(s->blk), &s->acct);
356 } else {
357 block_acct_done(blk_get_stats(s->blk), &s->acct);
358 }
359 }
360
361 ide_set_inactive(s, false);
362 io->dma_end(opaque);
363 }
364
365 static void pmac_ide_transfer(DBDMA_io *io)
366 {
367 MACIOIDEState *m = io->opaque;
368 IDEState *s = idebus_active_if(&m->bus);
369
370 MACIO_DPRINTF("\n");
371
372 if (s->drive_kind == IDE_CD) {
373 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
374 BLOCK_ACCT_READ);
375
376 pmac_ide_atapi_transfer_cb(io, 0);
377 return;
378 }
379
380 switch (s->dma_cmd) {
381 case IDE_DMA_READ:
382 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
383 BLOCK_ACCT_READ);
384 break;
385 case IDE_DMA_WRITE:
386 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
387 BLOCK_ACCT_WRITE);
388 break;
389 default:
390 break;
391 }
392
393 pmac_ide_transfer_cb(io, 0);
394 }
395
396 static void pmac_ide_flush(DBDMA_io *io)
397 {
398 MACIOIDEState *m = io->opaque;
399 IDEState *s = idebus_active_if(&m->bus);
400
401 if (s->bus->dma->aiocb) {
402 blk_drain_all();
403 }
404 }
405
406 /* PowerMac IDE memory IO */
407 static void pmac_ide_writeb (void *opaque,
408 hwaddr addr, uint32_t val)
409 {
410 MACIOIDEState *d = opaque;
411
412 addr = (addr & 0xFFF) >> 4;
413 switch (addr) {
414 case 1 ... 7:
415 ide_ioport_write(&d->bus, addr, val);
416 break;
417 case 8:
418 case 22:
419 ide_cmd_write(&d->bus, 0, val);
420 break;
421 default:
422 break;
423 }
424 }
425
426 static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
427 {
428 uint8_t retval;
429 MACIOIDEState *d = opaque;
430
431 addr = (addr & 0xFFF) >> 4;
432 switch (addr) {
433 case 1 ... 7:
434 retval = ide_ioport_read(&d->bus, addr);
435 break;
436 case 8:
437 case 22:
438 retval = ide_status_read(&d->bus, 0);
439 break;
440 default:
441 retval = 0xFF;
442 break;
443 }
444 return retval;
445 }
446
447 static void pmac_ide_writew (void *opaque,
448 hwaddr addr, uint32_t val)
449 {
450 MACIOIDEState *d = opaque;
451
452 addr = (addr & 0xFFF) >> 4;
453 val = bswap16(val);
454 if (addr == 0) {
455 ide_data_writew(&d->bus, 0, val);
456 }
457 }
458
459 static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
460 {
461 uint16_t retval;
462 MACIOIDEState *d = opaque;
463
464 addr = (addr & 0xFFF) >> 4;
465 if (addr == 0) {
466 retval = ide_data_readw(&d->bus, 0);
467 } else {
468 retval = 0xFFFF;
469 }
470 retval = bswap16(retval);
471 return retval;
472 }
473
474 static void pmac_ide_writel (void *opaque,
475 hwaddr addr, uint32_t val)
476 {
477 MACIOIDEState *d = opaque;
478
479 addr = (addr & 0xFFF) >> 4;
480 val = bswap32(val);
481 if (addr == 0) {
482 ide_data_writel(&d->bus, 0, val);
483 }
484 }
485
486 static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
487 {
488 uint32_t retval;
489 MACIOIDEState *d = opaque;
490
491 addr = (addr & 0xFFF) >> 4;
492 if (addr == 0) {
493 retval = ide_data_readl(&d->bus, 0);
494 } else {
495 retval = 0xFFFFFFFF;
496 }
497 retval = bswap32(retval);
498 return retval;
499 }
500
501 static const MemoryRegionOps pmac_ide_ops = {
502 .old_mmio = {
503 .write = {
504 pmac_ide_writeb,
505 pmac_ide_writew,
506 pmac_ide_writel,
507 },
508 .read = {
509 pmac_ide_readb,
510 pmac_ide_readw,
511 pmac_ide_readl,
512 },
513 },
514 .endianness = DEVICE_NATIVE_ENDIAN,
515 };
516
517 static const VMStateDescription vmstate_pmac = {
518 .name = "ide",
519 .version_id = 4,
520 .minimum_version_id = 0,
521 .fields = (VMStateField[]) {
522 VMSTATE_IDE_BUS(bus, MACIOIDEState),
523 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
524 VMSTATE_BOOL(dma_active, MACIOIDEState),
525 VMSTATE_END_OF_LIST()
526 }
527 };
528
529 static void macio_ide_reset(DeviceState *dev)
530 {
531 MACIOIDEState *d = MACIO_IDE(dev);
532
533 ide_bus_reset(&d->bus);
534 }
535
536 static int ide_nop_int(IDEDMA *dma, int x)
537 {
538 return 0;
539 }
540
541 static int32_t ide_nop_int32(IDEDMA *dma, int32_t l)
542 {
543 return 0;
544 }
545
546 static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
547 BlockCompletionFunc *cb)
548 {
549 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
550
551 s->io_buffer_index = 0;
552 if (s->drive_kind == IDE_CD) {
553 s->io_buffer_size = s->packet_transfer_size;
554 } else {
555 s->io_buffer_size = s->nsector * BDRV_SECTOR_SIZE;
556 }
557
558 MACIO_DPRINTF("\n\n------------ IDE transfer\n");
559 MACIO_DPRINTF("buffer_size: %x buffer_index: %x\n",
560 s->io_buffer_size, s->io_buffer_index);
561 MACIO_DPRINTF("lba: %x size: %x\n", s->lba, s->io_buffer_size);
562 MACIO_DPRINTF("-------------------------\n");
563
564 m->dma_active = true;
565 DBDMA_kick(m->dbdma);
566 }
567
568 static const IDEDMAOps dbdma_ops = {
569 .start_dma = ide_dbdma_start,
570 .prepare_buf = ide_nop_int32,
571 .rw_buf = ide_nop_int,
572 };
573
574 static void macio_ide_realizefn(DeviceState *dev, Error **errp)
575 {
576 MACIOIDEState *s = MACIO_IDE(dev);
577
578 ide_init2(&s->bus, s->irq);
579
580 /* Register DMA callbacks */
581 s->dma.ops = &dbdma_ops;
582 s->bus.dma = &s->dma;
583 }
584
585 static void macio_ide_initfn(Object *obj)
586 {
587 SysBusDevice *d = SYS_BUS_DEVICE(obj);
588 MACIOIDEState *s = MACIO_IDE(obj);
589
590 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
591 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
592 sysbus_init_mmio(d, &s->mem);
593 sysbus_init_irq(d, &s->irq);
594 sysbus_init_irq(d, &s->dma_irq);
595 }
596
597 static void macio_ide_class_init(ObjectClass *oc, void *data)
598 {
599 DeviceClass *dc = DEVICE_CLASS(oc);
600
601 dc->realize = macio_ide_realizefn;
602 dc->reset = macio_ide_reset;
603 dc->vmsd = &vmstate_pmac;
604 set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
605 }
606
607 static const TypeInfo macio_ide_type_info = {
608 .name = TYPE_MACIO_IDE,
609 .parent = TYPE_SYS_BUS_DEVICE,
610 .instance_size = sizeof(MACIOIDEState),
611 .instance_init = macio_ide_initfn,
612 .class_init = macio_ide_class_init,
613 };
614
615 static void macio_ide_register_types(void)
616 {
617 type_register_static(&macio_ide_type_info);
618 }
619
620 /* hd_table must contain 2 block drivers */
621 void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
622 {
623 int i;
624
625 for (i = 0; i < 2; i++) {
626 if (hd_table[i]) {
627 ide_create_drive(&s->bus, i, hd_table[i]);
628 }
629 }
630 }
631
632 void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
633 {
634 s->dbdma = dbdma;
635 DBDMA_register_channel(dbdma, channel, s->dma_irq,
636 pmac_ide_transfer, pmac_ide_flush, s);
637 }
638
639 type_init(macio_ide_register_types)