ide: Correct handling of malformed/short PRDTs
[qemu.git] / hw / ide / macio.c
1 /*
2 * QEMU IDE Emulation: MacIO support.
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25 #include "hw/hw.h"
26 #include "hw/ppc/mac.h"
27 #include "hw/ppc/mac_dbdma.h"
28 #include "sysemu/block-backend.h"
29 #include "sysemu/dma.h"
30
31 #include <hw/ide/internal.h>
32
33 /* debug MACIO */
34 // #define DEBUG_MACIO
35
36 #ifdef DEBUG_MACIO
37 static const int debug_macio = 1;
38 #else
39 static const int debug_macio = 0;
40 #endif
41
42 #define MACIO_DPRINTF(fmt, ...) do { \
43 if (debug_macio) { \
44 printf(fmt , ## __VA_ARGS__); \
45 } \
46 } while (0)
47
48
49 /***********************************************************/
50 /* MacIO based PowerPC IDE */
51
52 #define MACIO_PAGE_SIZE 4096
53
54 static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
55 {
56 DBDMA_io *io = opaque;
57 MACIOIDEState *m = io->opaque;
58 IDEState *s = idebus_active_if(&m->bus);
59 int unaligned;
60
61 if (ret < 0) {
62 m->aiocb = NULL;
63 qemu_sglist_destroy(&s->sg);
64 ide_atapi_io_error(s, ret);
65 io->remainder_len = 0;
66 goto done;
67 }
68
69 if (!m->dma_active) {
70 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
71 s->nsector, io->len, s->status);
72 /* data not ready yet, wait for the channel to get restarted */
73 io->processing = false;
74 return;
75 }
76
77 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
78
79 if (s->io_buffer_size > 0) {
80 m->aiocb = NULL;
81 qemu_sglist_destroy(&s->sg);
82
83 s->packet_transfer_size -= s->io_buffer_size;
84
85 s->io_buffer_index += s->io_buffer_size;
86 s->lba += s->io_buffer_index >> 11;
87 s->io_buffer_index &= 0x7ff;
88 }
89
90 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
91
92 MACIO_DPRINTF("remainder: %d io->len: %d size: %d\n", io->remainder_len,
93 io->len, s->packet_transfer_size);
94 if (io->remainder_len && io->len) {
95 /* guest wants the rest of its previous transfer */
96 int remainder_len = MIN(io->remainder_len, io->len);
97
98 MACIO_DPRINTF("copying remainder %d bytes\n", remainder_len);
99
100 cpu_physical_memory_write(io->addr, io->remainder + 0x200 -
101 remainder_len, remainder_len);
102
103 io->addr += remainder_len;
104 io->len -= remainder_len;
105 s->io_buffer_size = remainder_len;
106 io->remainder_len -= remainder_len;
107 /* treat remainder as individual transfer, start again */
108 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
109 &address_space_memory);
110 pmac_ide_atapi_transfer_cb(opaque, 0);
111 return;
112 }
113
114 if (!s->packet_transfer_size) {
115 MACIO_DPRINTF("end of transfer\n");
116 ide_atapi_cmd_ok(s);
117 m->dma_active = false;
118 }
119
120 if (io->len == 0) {
121 MACIO_DPRINTF("end of DMA\n");
122 goto done;
123 }
124
125 /* launch next transfer */
126
127 /* handle unaligned accesses first, get them over with and only do the
128 remaining bulk transfer using our async DMA helpers */
129 unaligned = io->len & 0x1ff;
130 if (unaligned) {
131 int sector_num = (s->lba << 2) + (s->io_buffer_index >> 9);
132 int nsector = io->len >> 9;
133
134 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
135 unaligned, io->addr + io->len - unaligned);
136
137 blk_read(s->blk, sector_num + nsector, io->remainder, 1);
138 cpu_physical_memory_write(io->addr + io->len - unaligned,
139 io->remainder, unaligned);
140
141 io->len -= unaligned;
142 }
143
144 MACIO_DPRINTF("io->len = %#x\n", io->len);
145
146 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
147 &address_space_memory);
148 qemu_sglist_add(&s->sg, io->addr, io->len);
149 io->addr += s->io_buffer_size;
150 io->remainder_len = MIN(s->packet_transfer_size - s->io_buffer_size,
151 (0x200 - unaligned) & 0x1ff);
152 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
153
154 /* We would read no data from the block layer, thus not get a callback.
155 Just fake completion manually. */
156 if (!io->len) {
157 pmac_ide_atapi_transfer_cb(opaque, 0);
158 return;
159 }
160
161 io->len = 0;
162
163 MACIO_DPRINTF("sector_num=%d size=%d, cmd_cmd=%d\n",
164 (s->lba << 2) + (s->io_buffer_index >> 9),
165 s->packet_transfer_size, s->dma_cmd);
166
167 m->aiocb = dma_blk_read(s->blk, &s->sg,
168 (int64_t)(s->lba << 2) + (s->io_buffer_index >> 9),
169 pmac_ide_atapi_transfer_cb, io);
170 return;
171
172 done:
173 MACIO_DPRINTF("done DMA\n");
174 block_acct_done(blk_get_stats(s->blk), &s->acct);
175 io->dma_end(opaque);
176 }
177
178 static void pmac_ide_transfer_cb(void *opaque, int ret)
179 {
180 DBDMA_io *io = opaque;
181 MACIOIDEState *m = io->opaque;
182 IDEState *s = idebus_active_if(&m->bus);
183 int n = 0;
184 int64_t sector_num;
185 int unaligned;
186
187 if (ret < 0) {
188 MACIO_DPRINTF("DMA error\n");
189 m->aiocb = NULL;
190 qemu_sglist_destroy(&s->sg);
191 ide_dma_error(s);
192 io->remainder_len = 0;
193 goto done;
194 }
195
196 if (--io->requests) {
197 /* More requests still in flight */
198 return;
199 }
200
201 if (!m->dma_active) {
202 MACIO_DPRINTF("waiting for data (%#x - %#x - %x)\n",
203 s->nsector, io->len, s->status);
204 /* data not ready yet, wait for the channel to get restarted */
205 io->processing = false;
206 return;
207 }
208
209 sector_num = ide_get_sector(s);
210 MACIO_DPRINTF("io_buffer_size = %#x\n", s->io_buffer_size);
211 if (s->io_buffer_size > 0) {
212 m->aiocb = NULL;
213 qemu_sglist_destroy(&s->sg);
214 n = (s->io_buffer_size + 0x1ff) >> 9;
215 sector_num += n;
216 ide_set_sector(s, sector_num);
217 s->nsector -= n;
218 }
219
220 if (io->finish_remain_read) {
221 /* Finish a stale read from the last iteration */
222 io->finish_remain_read = false;
223 cpu_physical_memory_write(io->finish_addr, io->remainder,
224 io->finish_len);
225 }
226
227 MACIO_DPRINTF("remainder: %d io->len: %d nsector: %d "
228 "sector_num: %" PRId64 "\n",
229 io->remainder_len, io->len, s->nsector, sector_num);
230 if (io->remainder_len && io->len) {
231 /* guest wants the rest of its previous transfer */
232 int remainder_len = MIN(io->remainder_len, io->len);
233 uint8_t *p = &io->remainder[0x200 - remainder_len];
234
235 MACIO_DPRINTF("copying remainder %d bytes at %#" HWADDR_PRIx "\n",
236 remainder_len, io->addr);
237
238 switch (s->dma_cmd) {
239 case IDE_DMA_READ:
240 cpu_physical_memory_write(io->addr, p, remainder_len);
241 break;
242 case IDE_DMA_WRITE:
243 cpu_physical_memory_read(io->addr, p, remainder_len);
244 break;
245 case IDE_DMA_TRIM:
246 break;
247 }
248 io->addr += remainder_len;
249 io->len -= remainder_len;
250 io->remainder_len -= remainder_len;
251
252 if (s->dma_cmd == IDE_DMA_WRITE && !io->remainder_len) {
253 io->requests++;
254 qemu_iovec_reset(&io->iov);
255 qemu_iovec_add(&io->iov, io->remainder, 0x200);
256
257 m->aiocb = blk_aio_writev(s->blk, sector_num - 1, &io->iov, 1,
258 pmac_ide_transfer_cb, io);
259 }
260 }
261
262 if (s->nsector == 0 && !io->remainder_len) {
263 MACIO_DPRINTF("end of transfer\n");
264 s->status = READY_STAT | SEEK_STAT;
265 ide_set_irq(s->bus);
266 m->dma_active = false;
267 }
268
269 if (io->len == 0) {
270 MACIO_DPRINTF("end of DMA\n");
271 goto done;
272 }
273
274 /* launch next transfer */
275
276 s->io_buffer_index = 0;
277 s->io_buffer_size = MIN(io->len, s->nsector * 512);
278
279 /* handle unaligned accesses first, get them over with and only do the
280 remaining bulk transfer using our async DMA helpers */
281 unaligned = io->len & 0x1ff;
282 if (unaligned) {
283 int nsector = io->len >> 9;
284
285 MACIO_DPRINTF("precopying unaligned %d bytes to %#" HWADDR_PRIx "\n",
286 unaligned, io->addr + io->len - unaligned);
287
288 switch (s->dma_cmd) {
289 case IDE_DMA_READ:
290 io->requests++;
291 io->finish_addr = io->addr + io->len - unaligned;
292 io->finish_len = unaligned;
293 io->finish_remain_read = true;
294 qemu_iovec_reset(&io->iov);
295 qemu_iovec_add(&io->iov, io->remainder, 0x200);
296
297 m->aiocb = blk_aio_readv(s->blk, sector_num + nsector, &io->iov, 1,
298 pmac_ide_transfer_cb, io);
299 break;
300 case IDE_DMA_WRITE:
301 /* cache the contents in our io struct */
302 cpu_physical_memory_read(io->addr + io->len - unaligned,
303 io->remainder + io->remainder_len,
304 unaligned);
305 break;
306 case IDE_DMA_TRIM:
307 break;
308 }
309 }
310
311 MACIO_DPRINTF("io->len = %#x\n", io->len);
312
313 qemu_sglist_init(&s->sg, DEVICE(m), io->len / MACIO_PAGE_SIZE + 1,
314 &address_space_memory);
315 qemu_sglist_add(&s->sg, io->addr, io->len);
316 io->addr += io->len + unaligned;
317 io->remainder_len = (0x200 - unaligned) & 0x1ff;
318 MACIO_DPRINTF("set remainder to: %d\n", io->remainder_len);
319
320 /* Only subsector reads happening */
321 if (!io->len) {
322 if (!io->requests) {
323 io->requests++;
324 pmac_ide_transfer_cb(opaque, ret);
325 }
326 return;
327 }
328
329 io->len = 0;
330
331 MACIO_DPRINTF("sector_num=%" PRId64 " n=%d, nsector=%d, cmd_cmd=%d\n",
332 sector_num, n, s->nsector, s->dma_cmd);
333
334 switch (s->dma_cmd) {
335 case IDE_DMA_READ:
336 m->aiocb = dma_blk_read(s->blk, &s->sg, sector_num,
337 pmac_ide_transfer_cb, io);
338 break;
339 case IDE_DMA_WRITE:
340 m->aiocb = dma_blk_write(s->blk, &s->sg, sector_num,
341 pmac_ide_transfer_cb, io);
342 break;
343 case IDE_DMA_TRIM:
344 m->aiocb = dma_blk_io(s->blk, &s->sg, sector_num,
345 ide_issue_trim, pmac_ide_transfer_cb, io,
346 DMA_DIRECTION_TO_DEVICE);
347 break;
348 }
349
350 io->requests++;
351 return;
352
353 done:
354 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
355 block_acct_done(blk_get_stats(s->blk), &s->acct);
356 }
357 io->dma_end(io);
358 }
359
360 static void pmac_ide_transfer(DBDMA_io *io)
361 {
362 MACIOIDEState *m = io->opaque;
363 IDEState *s = idebus_active_if(&m->bus);
364
365 MACIO_DPRINTF("\n");
366
367 s->io_buffer_size = 0;
368 if (s->drive_kind == IDE_CD) {
369
370 /* Handle non-block ATAPI DMA transfers */
371 if (s->lba == -1) {
372 s->io_buffer_size = MIN(io->len, s->packet_transfer_size);
373 block_acct_start(blk_get_stats(s->blk), &s->acct, s->io_buffer_size,
374 BLOCK_ACCT_READ);
375 MACIO_DPRINTF("non-block ATAPI DMA transfer size: %d\n",
376 s->io_buffer_size);
377
378 /* Copy ATAPI buffer directly to RAM and finish */
379 cpu_physical_memory_write(io->addr, s->io_buffer,
380 s->io_buffer_size);
381 ide_atapi_cmd_ok(s);
382 m->dma_active = false;
383
384 MACIO_DPRINTF("end of non-block ATAPI DMA transfer\n");
385 block_acct_done(blk_get_stats(s->blk), &s->acct);
386 io->dma_end(io);
387 return;
388 }
389
390 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
391 BLOCK_ACCT_READ);
392 pmac_ide_atapi_transfer_cb(io, 0);
393 return;
394 }
395
396 switch (s->dma_cmd) {
397 case IDE_DMA_READ:
398 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
399 BLOCK_ACCT_READ);
400 break;
401 case IDE_DMA_WRITE:
402 block_acct_start(blk_get_stats(s->blk), &s->acct, io->len,
403 BLOCK_ACCT_WRITE);
404 break;
405 default:
406 break;
407 }
408
409 io->requests++;
410 pmac_ide_transfer_cb(io, 0);
411 }
412
413 static void pmac_ide_flush(DBDMA_io *io)
414 {
415 MACIOIDEState *m = io->opaque;
416
417 if (m->aiocb) {
418 blk_drain_all();
419 }
420 }
421
422 /* PowerMac IDE memory IO */
423 static void pmac_ide_writeb (void *opaque,
424 hwaddr addr, uint32_t val)
425 {
426 MACIOIDEState *d = opaque;
427
428 addr = (addr & 0xFFF) >> 4;
429 switch (addr) {
430 case 1 ... 7:
431 ide_ioport_write(&d->bus, addr, val);
432 break;
433 case 8:
434 case 22:
435 ide_cmd_write(&d->bus, 0, val);
436 break;
437 default:
438 break;
439 }
440 }
441
442 static uint32_t pmac_ide_readb (void *opaque,hwaddr addr)
443 {
444 uint8_t retval;
445 MACIOIDEState *d = opaque;
446
447 addr = (addr & 0xFFF) >> 4;
448 switch (addr) {
449 case 1 ... 7:
450 retval = ide_ioport_read(&d->bus, addr);
451 break;
452 case 8:
453 case 22:
454 retval = ide_status_read(&d->bus, 0);
455 break;
456 default:
457 retval = 0xFF;
458 break;
459 }
460 return retval;
461 }
462
463 static void pmac_ide_writew (void *opaque,
464 hwaddr addr, uint32_t val)
465 {
466 MACIOIDEState *d = opaque;
467
468 addr = (addr & 0xFFF) >> 4;
469 val = bswap16(val);
470 if (addr == 0) {
471 ide_data_writew(&d->bus, 0, val);
472 }
473 }
474
475 static uint32_t pmac_ide_readw (void *opaque,hwaddr addr)
476 {
477 uint16_t retval;
478 MACIOIDEState *d = opaque;
479
480 addr = (addr & 0xFFF) >> 4;
481 if (addr == 0) {
482 retval = ide_data_readw(&d->bus, 0);
483 } else {
484 retval = 0xFFFF;
485 }
486 retval = bswap16(retval);
487 return retval;
488 }
489
490 static void pmac_ide_writel (void *opaque,
491 hwaddr addr, uint32_t val)
492 {
493 MACIOIDEState *d = opaque;
494
495 addr = (addr & 0xFFF) >> 4;
496 val = bswap32(val);
497 if (addr == 0) {
498 ide_data_writel(&d->bus, 0, val);
499 }
500 }
501
502 static uint32_t pmac_ide_readl (void *opaque,hwaddr addr)
503 {
504 uint32_t retval;
505 MACIOIDEState *d = opaque;
506
507 addr = (addr & 0xFFF) >> 4;
508 if (addr == 0) {
509 retval = ide_data_readl(&d->bus, 0);
510 } else {
511 retval = 0xFFFFFFFF;
512 }
513 retval = bswap32(retval);
514 return retval;
515 }
516
517 static const MemoryRegionOps pmac_ide_ops = {
518 .old_mmio = {
519 .write = {
520 pmac_ide_writeb,
521 pmac_ide_writew,
522 pmac_ide_writel,
523 },
524 .read = {
525 pmac_ide_readb,
526 pmac_ide_readw,
527 pmac_ide_readl,
528 },
529 },
530 .endianness = DEVICE_NATIVE_ENDIAN,
531 };
532
533 static const VMStateDescription vmstate_pmac = {
534 .name = "ide",
535 .version_id = 3,
536 .minimum_version_id = 0,
537 .fields = (VMStateField[]) {
538 VMSTATE_IDE_BUS(bus, MACIOIDEState),
539 VMSTATE_IDE_DRIVES(bus.ifs, MACIOIDEState),
540 VMSTATE_END_OF_LIST()
541 }
542 };
543
544 static void macio_ide_reset(DeviceState *dev)
545 {
546 MACIOIDEState *d = MACIO_IDE(dev);
547
548 ide_bus_reset(&d->bus);
549 }
550
551 static int ide_nop_int(IDEDMA *dma, int x)
552 {
553 return 0;
554 }
555
556 static int32_t ide_nop_int32(IDEDMA *dma, int x)
557 {
558 return 0;
559 }
560
561 static void ide_nop_restart(void *opaque, int x, RunState y)
562 {
563 }
564
565 static void ide_dbdma_start(IDEDMA *dma, IDEState *s,
566 BlockCompletionFunc *cb)
567 {
568 MACIOIDEState *m = container_of(dma, MACIOIDEState, dma);
569
570 MACIO_DPRINTF("\n");
571 m->dma_active = true;
572 DBDMA_kick(m->dbdma);
573 }
574
575 static const IDEDMAOps dbdma_ops = {
576 .start_dma = ide_dbdma_start,
577 .prepare_buf = ide_nop_int32,
578 .rw_buf = ide_nop_int,
579 .set_unit = ide_nop_int,
580 .restart_cb = ide_nop_restart,
581 };
582
583 static void macio_ide_realizefn(DeviceState *dev, Error **errp)
584 {
585 MACIOIDEState *s = MACIO_IDE(dev);
586
587 ide_init2(&s->bus, s->irq);
588
589 /* Register DMA callbacks */
590 s->dma.ops = &dbdma_ops;
591 s->bus.dma = &s->dma;
592 }
593
594 static void macio_ide_initfn(Object *obj)
595 {
596 SysBusDevice *d = SYS_BUS_DEVICE(obj);
597 MACIOIDEState *s = MACIO_IDE(obj);
598
599 ide_bus_new(&s->bus, sizeof(s->bus), DEVICE(obj), 0, 2);
600 memory_region_init_io(&s->mem, obj, &pmac_ide_ops, s, "pmac-ide", 0x1000);
601 sysbus_init_mmio(d, &s->mem);
602 sysbus_init_irq(d, &s->irq);
603 sysbus_init_irq(d, &s->dma_irq);
604 }
605
606 static void macio_ide_class_init(ObjectClass *oc, void *data)
607 {
608 DeviceClass *dc = DEVICE_CLASS(oc);
609
610 dc->realize = macio_ide_realizefn;
611 dc->reset = macio_ide_reset;
612 dc->vmsd = &vmstate_pmac;
613 }
614
615 static const TypeInfo macio_ide_type_info = {
616 .name = TYPE_MACIO_IDE,
617 .parent = TYPE_SYS_BUS_DEVICE,
618 .instance_size = sizeof(MACIOIDEState),
619 .instance_init = macio_ide_initfn,
620 .class_init = macio_ide_class_init,
621 };
622
623 static void macio_ide_register_types(void)
624 {
625 type_register_static(&macio_ide_type_info);
626 }
627
628 /* hd_table must contain 2 block drivers */
629 void macio_ide_init_drives(MACIOIDEState *s, DriveInfo **hd_table)
630 {
631 int i;
632
633 for (i = 0; i < 2; i++) {
634 if (hd_table[i]) {
635 ide_create_drive(&s->bus, i, hd_table[i]);
636 }
637 }
638 }
639
640 void macio_ide_register_dma(MACIOIDEState *s, void *dbdma, int channel)
641 {
642 s->dbdma = dbdma;
643 DBDMA_register_channel(dbdma, channel, s->dma_irq,
644 pmac_ide_transfer, pmac_ide_flush, s);
645 }
646
647 type_init(macio_ide_register_types)