Merge tag 'block-pull-request' of https://gitlab.com/stefanha/qemu into staging
[qemu.git] / hw / ide / core.c
1 /*
2 * QEMU IDE disk and CD/DVD-ROM Emulator
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2006 Openedhand Ltd.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "qemu/osdep.h"
27 #include "hw/isa/isa.h"
28 #include "migration/vmstate.h"
29 #include "qemu/error-report.h"
30 #include "qemu/main-loop.h"
31 #include "qemu/timer.h"
32 #include "qemu/hw-version.h"
33 #include "qemu/memalign.h"
34 #include "sysemu/sysemu.h"
35 #include "sysemu/blockdev.h"
36 #include "sysemu/dma.h"
37 #include "hw/block/block.h"
38 #include "sysemu/block-backend.h"
39 #include "qapi/error.h"
40 #include "qemu/cutils.h"
41 #include "sysemu/replay.h"
42 #include "sysemu/runstate.h"
43 #include "hw/ide/internal.h"
44 #include "trace.h"
45
46 /* These values were based on a Seagate ST3500418AS but have been modified
47 to make more sense in QEMU */
48 static const int smart_attributes[][12] = {
49 /* id, flags, hflags, val, wrst, raw (6 bytes), threshold */
50 /* raw read error rate*/
51 { 0x01, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06},
52 /* spin up */
53 { 0x03, 0x03, 0x00, 0x64, 0x64, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
54 /* start stop count */
55 { 0x04, 0x02, 0x00, 0x64, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x14},
56 /* remapped sectors */
57 { 0x05, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24},
58 /* power on hours */
59 { 0x09, 0x03, 0x00, 0x64, 0x64, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
60 /* power cycle count */
61 { 0x0c, 0x03, 0x00, 0x64, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
62 /* airflow-temperature-celsius */
63 { 190, 0x03, 0x00, 0x45, 0x45, 0x1f, 0x00, 0x1f, 0x1f, 0x00, 0x00, 0x32},
64 };
65
66 const char *IDE_DMA_CMD_lookup[IDE_DMA__COUNT] = {
67 [IDE_DMA_READ] = "DMA READ",
68 [IDE_DMA_WRITE] = "DMA WRITE",
69 [IDE_DMA_TRIM] = "DMA TRIM",
70 [IDE_DMA_ATAPI] = "DMA ATAPI"
71 };
72
73 static const char *IDE_DMA_CMD_str(enum ide_dma_cmd enval)
74 {
75 if ((unsigned)enval < IDE_DMA__COUNT) {
76 return IDE_DMA_CMD_lookup[enval];
77 }
78 return "DMA UNKNOWN CMD";
79 }
80
81 static void ide_dummy_transfer_stop(IDEState *s);
82
83 static void padstr(char *str, const char *src, int len)
84 {
85 int i, v;
86 for(i = 0; i < len; i++) {
87 if (*src)
88 v = *src++;
89 else
90 v = ' ';
91 str[i^1] = v;
92 }
93 }
94
95 static void put_le16(uint16_t *p, unsigned int v)
96 {
97 *p = cpu_to_le16(v);
98 }
99
100 static void ide_identify_size(IDEState *s)
101 {
102 uint16_t *p = (uint16_t *)s->identify_data;
103 int64_t nb_sectors_lba28 = s->nb_sectors;
104 if (nb_sectors_lba28 >= 1 << 28) {
105 nb_sectors_lba28 = (1 << 28) - 1;
106 }
107 put_le16(p + 60, nb_sectors_lba28);
108 put_le16(p + 61, nb_sectors_lba28 >> 16);
109 put_le16(p + 100, s->nb_sectors);
110 put_le16(p + 101, s->nb_sectors >> 16);
111 put_le16(p + 102, s->nb_sectors >> 32);
112 put_le16(p + 103, s->nb_sectors >> 48);
113 }
114
115 static void ide_identify(IDEState *s)
116 {
117 uint16_t *p;
118 unsigned int oldsize;
119 IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master;
120
121 p = (uint16_t *)s->identify_data;
122 if (s->identify_set) {
123 goto fill_buffer;
124 }
125 memset(p, 0, sizeof(s->identify_data));
126
127 put_le16(p + 0, 0x0040);
128 put_le16(p + 1, s->cylinders);
129 put_le16(p + 3, s->heads);
130 put_le16(p + 4, 512 * s->sectors); /* XXX: retired, remove ? */
131 put_le16(p + 5, 512); /* XXX: retired, remove ? */
132 put_le16(p + 6, s->sectors);
133 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
134 put_le16(p + 20, 3); /* XXX: retired, remove ? */
135 put_le16(p + 21, 512); /* cache size in sectors */
136 put_le16(p + 22, 4); /* ecc bytes */
137 padstr((char *)(p + 23), s->version, 8); /* firmware version */
138 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
139 #if MAX_MULT_SECTORS > 1
140 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
141 #endif
142 put_le16(p + 48, 1); /* dword I/O */
143 put_le16(p + 49, (1 << 11) | (1 << 9) | (1 << 8)); /* DMA and LBA supported */
144 put_le16(p + 51, 0x200); /* PIO transfer cycle */
145 put_le16(p + 52, 0x200); /* DMA transfer cycle */
146 put_le16(p + 53, 1 | (1 << 1) | (1 << 2)); /* words 54-58,64-70,88 are valid */
147 put_le16(p + 54, s->cylinders);
148 put_le16(p + 55, s->heads);
149 put_le16(p + 56, s->sectors);
150 oldsize = s->cylinders * s->heads * s->sectors;
151 put_le16(p + 57, oldsize);
152 put_le16(p + 58, oldsize >> 16);
153 if (s->mult_sectors)
154 put_le16(p + 59, 0x100 | s->mult_sectors);
155 /* *(p + 60) := nb_sectors -- see ide_identify_size */
156 /* *(p + 61) := nb_sectors >> 16 -- see ide_identify_size */
157 put_le16(p + 62, 0x07); /* single word dma0-2 supported */
158 put_le16(p + 63, 0x07); /* mdma0-2 supported */
159 put_le16(p + 64, 0x03); /* pio3-4 supported */
160 put_le16(p + 65, 120);
161 put_le16(p + 66, 120);
162 put_le16(p + 67, 120);
163 put_le16(p + 68, 120);
164 if (dev && dev->conf.discard_granularity) {
165 put_le16(p + 69, (1 << 14)); /* determinate TRIM behavior */
166 }
167
168 if (s->ncq_queues) {
169 put_le16(p + 75, s->ncq_queues - 1);
170 /* NCQ supported */
171 put_le16(p + 76, (1 << 8));
172 }
173
174 put_le16(p + 80, 0xf0); /* ata3 -> ata6 supported */
175 put_le16(p + 81, 0x16); /* conforms to ata5 */
176 /* 14=NOP supported, 5=WCACHE supported, 0=SMART supported */
177 put_le16(p + 82, (1 << 14) | (1 << 5) | 1);
178 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
179 put_le16(p + 83, (1 << 14) | (1 << 13) | (1 <<12) | (1 << 10));
180 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
181 if (s->wwn) {
182 put_le16(p + 84, (1 << 14) | (1 << 8) | 0);
183 } else {
184 put_le16(p + 84, (1 << 14) | 0);
185 }
186 /* 14 = NOP supported, 5=WCACHE enabled, 0=SMART feature set enabled */
187 if (blk_enable_write_cache(s->blk)) {
188 put_le16(p + 85, (1 << 14) | (1 << 5) | 1);
189 } else {
190 put_le16(p + 85, (1 << 14) | 1);
191 }
192 /* 13=flush_cache_ext,12=flush_cache,10=lba48 */
193 put_le16(p + 86, (1 << 13) | (1 <<12) | (1 << 10));
194 /* 14=set to 1, 8=has WWN, 1=SMART self test, 0=SMART error logging */
195 if (s->wwn) {
196 put_le16(p + 87, (1 << 14) | (1 << 8) | 0);
197 } else {
198 put_le16(p + 87, (1 << 14) | 0);
199 }
200 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
201 put_le16(p + 93, 1 | (1 << 14) | 0x2000);
202 /* *(p + 100) := nb_sectors -- see ide_identify_size */
203 /* *(p + 101) := nb_sectors >> 16 -- see ide_identify_size */
204 /* *(p + 102) := nb_sectors >> 32 -- see ide_identify_size */
205 /* *(p + 103) := nb_sectors >> 48 -- see ide_identify_size */
206
207 if (dev && dev->conf.physical_block_size)
208 put_le16(p + 106, 0x6000 | get_physical_block_exp(&dev->conf));
209 if (s->wwn) {
210 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
211 put_le16(p + 108, s->wwn >> 48);
212 put_le16(p + 109, s->wwn >> 32);
213 put_le16(p + 110, s->wwn >> 16);
214 put_le16(p + 111, s->wwn);
215 }
216 if (dev && dev->conf.discard_granularity) {
217 put_le16(p + 169, 1); /* TRIM support */
218 }
219 if (dev) {
220 put_le16(p + 217, dev->rotation_rate); /* Nominal media rotation rate */
221 }
222
223 ide_identify_size(s);
224 s->identify_set = 1;
225
226 fill_buffer:
227 memcpy(s->io_buffer, p, sizeof(s->identify_data));
228 }
229
230 static void ide_atapi_identify(IDEState *s)
231 {
232 uint16_t *p;
233
234 p = (uint16_t *)s->identify_data;
235 if (s->identify_set) {
236 goto fill_buffer;
237 }
238 memset(p, 0, sizeof(s->identify_data));
239
240 /* Removable CDROM, 50us response, 12 byte packets */
241 put_le16(p + 0, (2 << 14) | (5 << 8) | (1 << 7) | (2 << 5) | (0 << 0));
242 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
243 put_le16(p + 20, 3); /* buffer type */
244 put_le16(p + 21, 512); /* cache size in sectors */
245 put_le16(p + 22, 4); /* ecc bytes */
246 padstr((char *)(p + 23), s->version, 8); /* firmware version */
247 padstr((char *)(p + 27), s->drive_model_str, 40); /* model */
248 put_le16(p + 48, 1); /* dword I/O (XXX: should not be set on CDROM) */
249 #ifdef USE_DMA_CDROM
250 put_le16(p + 49, 1 << 9 | 1 << 8); /* DMA and LBA supported */
251 put_le16(p + 53, 7); /* words 64-70, 54-58, 88 valid */
252 put_le16(p + 62, 7); /* single word dma0-2 supported */
253 put_le16(p + 63, 7); /* mdma0-2 supported */
254 #else
255 put_le16(p + 49, 1 << 9); /* LBA supported, no DMA */
256 put_le16(p + 53, 3); /* words 64-70, 54-58 valid */
257 put_le16(p + 63, 0x103); /* DMA modes XXX: may be incorrect */
258 #endif
259 put_le16(p + 64, 3); /* pio3-4 supported */
260 put_le16(p + 65, 0xb4); /* minimum DMA multiword tx cycle time */
261 put_le16(p + 66, 0xb4); /* recommended DMA multiword tx cycle time */
262 put_le16(p + 67, 0x12c); /* minimum PIO cycle time without flow control */
263 put_le16(p + 68, 0xb4); /* minimum PIO cycle time with IORDY flow control */
264
265 put_le16(p + 71, 30); /* in ns */
266 put_le16(p + 72, 30); /* in ns */
267
268 if (s->ncq_queues) {
269 put_le16(p + 75, s->ncq_queues - 1);
270 /* NCQ supported */
271 put_le16(p + 76, (1 << 8));
272 }
273
274 put_le16(p + 80, 0x1e); /* support up to ATA/ATAPI-4 */
275 if (s->wwn) {
276 put_le16(p + 84, (1 << 8)); /* supports WWN for words 108-111 */
277 put_le16(p + 87, (1 << 8)); /* WWN enabled */
278 }
279
280 #ifdef USE_DMA_CDROM
281 put_le16(p + 88, 0x3f | (1 << 13)); /* udma5 set and supported */
282 #endif
283
284 if (s->wwn) {
285 /* LE 16-bit words 111-108 contain 64-bit World Wide Name */
286 put_le16(p + 108, s->wwn >> 48);
287 put_le16(p + 109, s->wwn >> 32);
288 put_le16(p + 110, s->wwn >> 16);
289 put_le16(p + 111, s->wwn);
290 }
291
292 s->identify_set = 1;
293
294 fill_buffer:
295 memcpy(s->io_buffer, p, sizeof(s->identify_data));
296 }
297
298 static void ide_cfata_identify_size(IDEState *s)
299 {
300 uint16_t *p = (uint16_t *)s->identify_data;
301 put_le16(p + 7, s->nb_sectors >> 16); /* Sectors per card */
302 put_le16(p + 8, s->nb_sectors); /* Sectors per card */
303 put_le16(p + 60, s->nb_sectors); /* Total LBA sectors */
304 put_le16(p + 61, s->nb_sectors >> 16); /* Total LBA sectors */
305 }
306
307 static void ide_cfata_identify(IDEState *s)
308 {
309 uint16_t *p;
310 uint32_t cur_sec;
311
312 p = (uint16_t *)s->identify_data;
313 if (s->identify_set) {
314 goto fill_buffer;
315 }
316 memset(p, 0, sizeof(s->identify_data));
317
318 cur_sec = s->cylinders * s->heads * s->sectors;
319
320 put_le16(p + 0, 0x848a); /* CF Storage Card signature */
321 put_le16(p + 1, s->cylinders); /* Default cylinders */
322 put_le16(p + 3, s->heads); /* Default heads */
323 put_le16(p + 6, s->sectors); /* Default sectors per track */
324 /* *(p + 7) := nb_sectors >> 16 -- see ide_cfata_identify_size */
325 /* *(p + 8) := nb_sectors -- see ide_cfata_identify_size */
326 padstr((char *)(p + 10), s->drive_serial_str, 20); /* serial number */
327 put_le16(p + 22, 0x0004); /* ECC bytes */
328 padstr((char *) (p + 23), s->version, 8); /* Firmware Revision */
329 padstr((char *) (p + 27), s->drive_model_str, 40);/* Model number */
330 #if MAX_MULT_SECTORS > 1
331 put_le16(p + 47, 0x8000 | MAX_MULT_SECTORS);
332 #else
333 put_le16(p + 47, 0x0000);
334 #endif
335 put_le16(p + 49, 0x0f00); /* Capabilities */
336 put_le16(p + 51, 0x0002); /* PIO cycle timing mode */
337 put_le16(p + 52, 0x0001); /* DMA cycle timing mode */
338 put_le16(p + 53, 0x0003); /* Translation params valid */
339 put_le16(p + 54, s->cylinders); /* Current cylinders */
340 put_le16(p + 55, s->heads); /* Current heads */
341 put_le16(p + 56, s->sectors); /* Current sectors */
342 put_le16(p + 57, cur_sec); /* Current capacity */
343 put_le16(p + 58, cur_sec >> 16); /* Current capacity */
344 if (s->mult_sectors) /* Multiple sector setting */
345 put_le16(p + 59, 0x100 | s->mult_sectors);
346 /* *(p + 60) := nb_sectors -- see ide_cfata_identify_size */
347 /* *(p + 61) := nb_sectors >> 16 -- see ide_cfata_identify_size */
348 put_le16(p + 63, 0x0203); /* Multiword DMA capability */
349 put_le16(p + 64, 0x0001); /* Flow Control PIO support */
350 put_le16(p + 65, 0x0096); /* Min. Multiword DMA cycle */
351 put_le16(p + 66, 0x0096); /* Rec. Multiword DMA cycle */
352 put_le16(p + 68, 0x00b4); /* Min. PIO cycle time */
353 put_le16(p + 82, 0x400c); /* Command Set supported */
354 put_le16(p + 83, 0x7068); /* Command Set supported */
355 put_le16(p + 84, 0x4000); /* Features supported */
356 put_le16(p + 85, 0x000c); /* Command Set enabled */
357 put_le16(p + 86, 0x7044); /* Command Set enabled */
358 put_le16(p + 87, 0x4000); /* Features enabled */
359 put_le16(p + 91, 0x4060); /* Current APM level */
360 put_le16(p + 129, 0x0002); /* Current features option */
361 put_le16(p + 130, 0x0005); /* Reassigned sectors */
362 put_le16(p + 131, 0x0001); /* Initial power mode */
363 put_le16(p + 132, 0x0000); /* User signature */
364 put_le16(p + 160, 0x8100); /* Power requirement */
365 put_le16(p + 161, 0x8001); /* CF command set */
366
367 ide_cfata_identify_size(s);
368 s->identify_set = 1;
369
370 fill_buffer:
371 memcpy(s->io_buffer, p, sizeof(s->identify_data));
372 }
373
374 static void ide_set_signature(IDEState *s)
375 {
376 s->select &= ~(ATA_DEV_HS); /* clear head */
377 /* put signature */
378 s->nsector = 1;
379 s->sector = 1;
380 if (s->drive_kind == IDE_CD) {
381 s->lcyl = 0x14;
382 s->hcyl = 0xeb;
383 } else if (s->blk) {
384 s->lcyl = 0;
385 s->hcyl = 0;
386 } else {
387 s->lcyl = 0xff;
388 s->hcyl = 0xff;
389 }
390 }
391
392 static bool ide_sect_range_ok(IDEState *s,
393 uint64_t sector, uint64_t nb_sectors)
394 {
395 uint64_t total_sectors;
396
397 blk_get_geometry(s->blk, &total_sectors);
398 if (sector > total_sectors || nb_sectors > total_sectors - sector) {
399 return false;
400 }
401 return true;
402 }
403
404 typedef struct TrimAIOCB {
405 BlockAIOCB common;
406 IDEState *s;
407 QEMUBH *bh;
408 int ret;
409 QEMUIOVector *qiov;
410 BlockAIOCB *aiocb;
411 int i, j;
412 } TrimAIOCB;
413
414 static void trim_aio_cancel(BlockAIOCB *acb)
415 {
416 TrimAIOCB *iocb = container_of(acb, TrimAIOCB, common);
417
418 /* Exit the loop so ide_issue_trim_cb will not continue */
419 iocb->j = iocb->qiov->niov - 1;
420 iocb->i = (iocb->qiov->iov[iocb->j].iov_len / 8) - 1;
421
422 iocb->ret = -ECANCELED;
423
424 if (iocb->aiocb) {
425 blk_aio_cancel_async(iocb->aiocb);
426 iocb->aiocb = NULL;
427 }
428 }
429
430 static const AIOCBInfo trim_aiocb_info = {
431 .aiocb_size = sizeof(TrimAIOCB),
432 .cancel_async = trim_aio_cancel,
433 };
434
435 static void ide_trim_bh_cb(void *opaque)
436 {
437 TrimAIOCB *iocb = opaque;
438 BlockBackend *blk = iocb->s->blk;
439
440 iocb->common.cb(iocb->common.opaque, iocb->ret);
441
442 qemu_bh_delete(iocb->bh);
443 iocb->bh = NULL;
444 qemu_aio_unref(iocb);
445
446 /* Paired with an increment in ide_issue_trim() */
447 blk_dec_in_flight(blk);
448 }
449
450 static void ide_issue_trim_cb(void *opaque, int ret)
451 {
452 TrimAIOCB *iocb = opaque;
453 IDEState *s = iocb->s;
454
455 if (iocb->i >= 0) {
456 if (ret >= 0) {
457 block_acct_done(blk_get_stats(s->blk), &s->acct);
458 } else {
459 block_acct_failed(blk_get_stats(s->blk), &s->acct);
460 }
461 }
462
463 if (ret >= 0) {
464 while (iocb->j < iocb->qiov->niov) {
465 int j = iocb->j;
466 while (++iocb->i < iocb->qiov->iov[j].iov_len / 8) {
467 int i = iocb->i;
468 uint64_t *buffer = iocb->qiov->iov[j].iov_base;
469
470 /* 6-byte LBA + 2-byte range per entry */
471 uint64_t entry = le64_to_cpu(buffer[i]);
472 uint64_t sector = entry & 0x0000ffffffffffffULL;
473 uint16_t count = entry >> 48;
474
475 if (count == 0) {
476 continue;
477 }
478
479 if (!ide_sect_range_ok(s, sector, count)) {
480 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_UNMAP);
481 iocb->ret = -EINVAL;
482 goto done;
483 }
484
485 block_acct_start(blk_get_stats(s->blk), &s->acct,
486 count << BDRV_SECTOR_BITS, BLOCK_ACCT_UNMAP);
487
488 /* Got an entry! Submit and exit. */
489 iocb->aiocb = blk_aio_pdiscard(s->blk,
490 sector << BDRV_SECTOR_BITS,
491 count << BDRV_SECTOR_BITS,
492 ide_issue_trim_cb, opaque);
493 return;
494 }
495
496 iocb->j++;
497 iocb->i = -1;
498 }
499 } else {
500 iocb->ret = ret;
501 }
502
503 done:
504 iocb->aiocb = NULL;
505 if (iocb->bh) {
506 replay_bh_schedule_event(iocb->bh);
507 }
508 }
509
510 BlockAIOCB *ide_issue_trim(
511 int64_t offset, QEMUIOVector *qiov,
512 BlockCompletionFunc *cb, void *cb_opaque, void *opaque)
513 {
514 IDEState *s = opaque;
515 TrimAIOCB *iocb;
516
517 /* Paired with a decrement in ide_trim_bh_cb() */
518 blk_inc_in_flight(s->blk);
519
520 iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque);
521 iocb->s = s;
522 iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb);
523 iocb->ret = 0;
524 iocb->qiov = qiov;
525 iocb->i = -1;
526 iocb->j = 0;
527 ide_issue_trim_cb(iocb, 0);
528 return &iocb->common;
529 }
530
531 void ide_abort_command(IDEState *s)
532 {
533 ide_transfer_stop(s);
534 s->status = READY_STAT | ERR_STAT;
535 s->error = ABRT_ERR;
536 }
537
538 static void ide_set_retry(IDEState *s)
539 {
540 s->bus->retry_unit = s->unit;
541 s->bus->retry_sector_num = ide_get_sector(s);
542 s->bus->retry_nsector = s->nsector;
543 }
544
545 static void ide_clear_retry(IDEState *s)
546 {
547 s->bus->retry_unit = -1;
548 s->bus->retry_sector_num = 0;
549 s->bus->retry_nsector = 0;
550 }
551
552 /* prepare data transfer and tell what to do after */
553 bool ide_transfer_start_norecurse(IDEState *s, uint8_t *buf, int size,
554 EndTransferFunc *end_transfer_func)
555 {
556 s->data_ptr = buf;
557 s->data_end = buf + size;
558 ide_set_retry(s);
559 if (!(s->status & ERR_STAT)) {
560 s->status |= DRQ_STAT;
561 }
562 if (!s->bus->dma->ops->pio_transfer) {
563 s->end_transfer_func = end_transfer_func;
564 return false;
565 }
566 s->bus->dma->ops->pio_transfer(s->bus->dma);
567 return true;
568 }
569
570 void ide_transfer_start(IDEState *s, uint8_t *buf, int size,
571 EndTransferFunc *end_transfer_func)
572 {
573 if (ide_transfer_start_norecurse(s, buf, size, end_transfer_func)) {
574 end_transfer_func(s);
575 }
576 }
577
578 static void ide_cmd_done(IDEState *s)
579 {
580 if (s->bus->dma->ops->cmd_done) {
581 s->bus->dma->ops->cmd_done(s->bus->dma);
582 }
583 }
584
585 static void ide_transfer_halt(IDEState *s)
586 {
587 s->end_transfer_func = ide_transfer_stop;
588 s->data_ptr = s->io_buffer;
589 s->data_end = s->io_buffer;
590 s->status &= ~DRQ_STAT;
591 }
592
593 void ide_transfer_stop(IDEState *s)
594 {
595 ide_transfer_halt(s);
596 ide_cmd_done(s);
597 }
598
599 int64_t ide_get_sector(IDEState *s)
600 {
601 int64_t sector_num;
602 if (s->select & (ATA_DEV_LBA)) {
603 if (s->lba48) {
604 sector_num = ((int64_t)s->hob_hcyl << 40) |
605 ((int64_t) s->hob_lcyl << 32) |
606 ((int64_t) s->hob_sector << 24) |
607 ((int64_t) s->hcyl << 16) |
608 ((int64_t) s->lcyl << 8) | s->sector;
609 } else {
610 /* LBA28 */
611 sector_num = ((s->select & (ATA_DEV_LBA_MSB)) << 24) |
612 (s->hcyl << 16) | (s->lcyl << 8) | s->sector;
613 }
614 } else {
615 /* CHS */
616 sector_num = ((s->hcyl << 8) | s->lcyl) * s->heads * s->sectors +
617 (s->select & (ATA_DEV_HS)) * s->sectors + (s->sector - 1);
618 }
619
620 return sector_num;
621 }
622
623 void ide_set_sector(IDEState *s, int64_t sector_num)
624 {
625 unsigned int cyl, r;
626 if (s->select & (ATA_DEV_LBA)) {
627 if (s->lba48) {
628 s->sector = sector_num;
629 s->lcyl = sector_num >> 8;
630 s->hcyl = sector_num >> 16;
631 s->hob_sector = sector_num >> 24;
632 s->hob_lcyl = sector_num >> 32;
633 s->hob_hcyl = sector_num >> 40;
634 } else {
635 /* LBA28 */
636 s->select = (s->select & ~(ATA_DEV_LBA_MSB)) |
637 ((sector_num >> 24) & (ATA_DEV_LBA_MSB));
638 s->hcyl = (sector_num >> 16);
639 s->lcyl = (sector_num >> 8);
640 s->sector = (sector_num);
641 }
642 } else {
643 /* CHS */
644 cyl = sector_num / (s->heads * s->sectors);
645 r = sector_num % (s->heads * s->sectors);
646 s->hcyl = cyl >> 8;
647 s->lcyl = cyl;
648 s->select = (s->select & ~(ATA_DEV_HS)) |
649 ((r / s->sectors) & (ATA_DEV_HS));
650 s->sector = (r % s->sectors) + 1;
651 }
652 }
653
654 static void ide_rw_error(IDEState *s) {
655 ide_abort_command(s);
656 ide_set_irq(s->bus);
657 }
658
659 static void ide_buffered_readv_cb(void *opaque, int ret)
660 {
661 IDEBufferedRequest *req = opaque;
662 if (!req->orphaned) {
663 if (!ret) {
664 assert(req->qiov.size == req->original_qiov->size);
665 qemu_iovec_from_buf(req->original_qiov, 0,
666 req->qiov.local_iov.iov_base,
667 req->original_qiov->size);
668 }
669 req->original_cb(req->original_opaque, ret);
670 }
671 QLIST_REMOVE(req, list);
672 qemu_vfree(qemu_iovec_buf(&req->qiov));
673 g_free(req);
674 }
675
676 #define MAX_BUFFERED_REQS 16
677
678 BlockAIOCB *ide_buffered_readv(IDEState *s, int64_t sector_num,
679 QEMUIOVector *iov, int nb_sectors,
680 BlockCompletionFunc *cb, void *opaque)
681 {
682 BlockAIOCB *aioreq;
683 IDEBufferedRequest *req;
684 int c = 0;
685
686 QLIST_FOREACH(req, &s->buffered_requests, list) {
687 c++;
688 }
689 if (c > MAX_BUFFERED_REQS) {
690 return blk_abort_aio_request(s->blk, cb, opaque, -EIO);
691 }
692
693 req = g_new0(IDEBufferedRequest, 1);
694 req->original_qiov = iov;
695 req->original_cb = cb;
696 req->original_opaque = opaque;
697 qemu_iovec_init_buf(&req->qiov, blk_blockalign(s->blk, iov->size),
698 iov->size);
699
700 aioreq = blk_aio_preadv(s->blk, sector_num << BDRV_SECTOR_BITS,
701 &req->qiov, 0, ide_buffered_readv_cb, req);
702
703 QLIST_INSERT_HEAD(&s->buffered_requests, req, list);
704 return aioreq;
705 }
706
707 /**
708 * Cancel all pending DMA requests.
709 * Any buffered DMA requests are instantly canceled,
710 * but any pending unbuffered DMA requests must be waited on.
711 */
712 void ide_cancel_dma_sync(IDEState *s)
713 {
714 IDEBufferedRequest *req;
715
716 /* First invoke the callbacks of all buffered requests
717 * and flag those requests as orphaned. Ideally there
718 * are no unbuffered (Scatter Gather DMA Requests or
719 * write requests) pending and we can avoid to drain. */
720 QLIST_FOREACH(req, &s->buffered_requests, list) {
721 if (!req->orphaned) {
722 trace_ide_cancel_dma_sync_buffered(req->original_cb, req);
723 req->original_cb(req->original_opaque, -ECANCELED);
724 }
725 req->orphaned = true;
726 }
727
728 /*
729 * We can't cancel Scatter Gather DMA in the middle of the
730 * operation or a partial (not full) DMA transfer would reach
731 * the storage so we wait for completion instead (we behave
732 * like if the DMA was completed by the time the guest trying
733 * to cancel dma with bmdma_cmd_writeb with BM_CMD_START not
734 * set).
735 *
736 * In the future we'll be able to safely cancel the I/O if the
737 * whole DMA operation will be submitted to disk with a single
738 * aio operation with preadv/pwritev.
739 */
740 if (s->bus->dma->aiocb) {
741 trace_ide_cancel_dma_sync_remaining();
742 blk_drain(s->blk);
743 assert(s->bus->dma->aiocb == NULL);
744 }
745 }
746
747 static void ide_sector_read(IDEState *s);
748
749 static void ide_sector_read_cb(void *opaque, int ret)
750 {
751 IDEState *s = opaque;
752 int n;
753
754 s->pio_aiocb = NULL;
755 s->status &= ~BUSY_STAT;
756
757 if (ret != 0) {
758 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO |
759 IDE_RETRY_READ)) {
760 return;
761 }
762 }
763
764 block_acct_done(blk_get_stats(s->blk), &s->acct);
765
766 n = s->nsector;
767 if (n > s->req_nb_sectors) {
768 n = s->req_nb_sectors;
769 }
770
771 ide_set_sector(s, ide_get_sector(s) + n);
772 s->nsector -= n;
773 /* Allow the guest to read the io_buffer */
774 ide_transfer_start(s, s->io_buffer, n * BDRV_SECTOR_SIZE, ide_sector_read);
775 ide_set_irq(s->bus);
776 }
777
778 static void ide_sector_read(IDEState *s)
779 {
780 int64_t sector_num;
781 int n;
782
783 s->status = READY_STAT | SEEK_STAT;
784 s->error = 0; /* not needed by IDE spec, but needed by Windows */
785 sector_num = ide_get_sector(s);
786 n = s->nsector;
787
788 if (n == 0) {
789 ide_transfer_stop(s);
790 return;
791 }
792
793 s->status |= BUSY_STAT;
794
795 if (n > s->req_nb_sectors) {
796 n = s->req_nb_sectors;
797 }
798
799 trace_ide_sector_read(sector_num, n);
800
801 if (!ide_sect_range_ok(s, sector_num, n)) {
802 ide_rw_error(s);
803 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_READ);
804 return;
805 }
806
807 qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
808
809 block_acct_start(blk_get_stats(s->blk), &s->acct,
810 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
811 s->pio_aiocb = ide_buffered_readv(s, sector_num, &s->qiov, n,
812 ide_sector_read_cb, s);
813 }
814
815 void dma_buf_commit(IDEState *s, uint32_t tx_bytes)
816 {
817 if (s->bus->dma->ops->commit_buf) {
818 s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes);
819 }
820 s->io_buffer_offset += tx_bytes;
821 qemu_sglist_destroy(&s->sg);
822 }
823
824 void ide_set_inactive(IDEState *s, bool more)
825 {
826 s->bus->dma->aiocb = NULL;
827 ide_clear_retry(s);
828 if (s->bus->dma->ops->set_inactive) {
829 s->bus->dma->ops->set_inactive(s->bus->dma, more);
830 }
831 ide_cmd_done(s);
832 }
833
834 void ide_dma_error(IDEState *s)
835 {
836 dma_buf_commit(s, 0);
837 ide_abort_command(s);
838 ide_set_inactive(s, false);
839 ide_set_irq(s->bus);
840 }
841
842 int ide_handle_rw_error(IDEState *s, int error, int op)
843 {
844 bool is_read = (op & IDE_RETRY_READ) != 0;
845 BlockErrorAction action = blk_get_error_action(s->blk, is_read, error);
846
847 if (action == BLOCK_ERROR_ACTION_STOP) {
848 assert(s->bus->retry_unit == s->unit);
849 s->bus->error_status = op;
850 } else if (action == BLOCK_ERROR_ACTION_REPORT) {
851 block_acct_failed(blk_get_stats(s->blk), &s->acct);
852 if (IS_IDE_RETRY_DMA(op)) {
853 ide_dma_error(s);
854 } else if (IS_IDE_RETRY_ATAPI(op)) {
855 ide_atapi_io_error(s, -error);
856 } else {
857 ide_rw_error(s);
858 }
859 }
860 blk_error_action(s->blk, action, is_read, error);
861 return action != BLOCK_ERROR_ACTION_IGNORE;
862 }
863
864 static void ide_dma_cb(void *opaque, int ret)
865 {
866 IDEState *s = opaque;
867 int n;
868 int64_t sector_num;
869 uint64_t offset;
870 bool stay_active = false;
871 int32_t prep_size = 0;
872
873 if (ret == -EINVAL) {
874 ide_dma_error(s);
875 return;
876 }
877
878 if (ret < 0) {
879 if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) {
880 s->bus->dma->aiocb = NULL;
881 dma_buf_commit(s, 0);
882 return;
883 }
884 }
885
886 if (s->io_buffer_size > s->nsector * 512) {
887 /*
888 * The PRDs were longer than needed for this request.
889 * The Active bit must remain set after the request completes.
890 */
891 n = s->nsector;
892 stay_active = true;
893 } else {
894 n = s->io_buffer_size >> 9;
895 }
896
897 sector_num = ide_get_sector(s);
898 if (n > 0) {
899 assert(n * 512 == s->sg.size);
900 dma_buf_commit(s, s->sg.size);
901 sector_num += n;
902 ide_set_sector(s, sector_num);
903 s->nsector -= n;
904 }
905
906 /* end of transfer ? */
907 if (s->nsector == 0) {
908 s->status = READY_STAT | SEEK_STAT;
909 ide_set_irq(s->bus);
910 goto eot;
911 }
912
913 /* launch next transfer */
914 n = s->nsector;
915 s->io_buffer_index = 0;
916 s->io_buffer_size = n * 512;
917 prep_size = s->bus->dma->ops->prepare_buf(s->bus->dma, s->io_buffer_size);
918 /* prepare_buf() must succeed and respect the limit */
919 assert(prep_size >= 0 && prep_size <= n * 512);
920
921 /*
922 * Now prep_size stores the number of bytes in the sglist, and
923 * s->io_buffer_size stores the number of bytes described by the PRDs.
924 */
925
926 if (prep_size < n * 512) {
927 /*
928 * The PRDs are too short for this request. Error condition!
929 * Reset the Active bit and don't raise the interrupt.
930 */
931 s->status = READY_STAT | SEEK_STAT;
932 dma_buf_commit(s, 0);
933 goto eot;
934 }
935
936 trace_ide_dma_cb(s, sector_num, n, IDE_DMA_CMD_str(s->dma_cmd));
937
938 if ((s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) &&
939 !ide_sect_range_ok(s, sector_num, n)) {
940 ide_dma_error(s);
941 block_acct_invalid(blk_get_stats(s->blk), s->acct.type);
942 return;
943 }
944
945 offset = sector_num << BDRV_SECTOR_BITS;
946 switch (s->dma_cmd) {
947 case IDE_DMA_READ:
948 s->bus->dma->aiocb = dma_blk_read(s->blk, &s->sg, offset,
949 BDRV_SECTOR_SIZE, ide_dma_cb, s);
950 break;
951 case IDE_DMA_WRITE:
952 s->bus->dma->aiocb = dma_blk_write(s->blk, &s->sg, offset,
953 BDRV_SECTOR_SIZE, ide_dma_cb, s);
954 break;
955 case IDE_DMA_TRIM:
956 s->bus->dma->aiocb = dma_blk_io(blk_get_aio_context(s->blk),
957 &s->sg, offset, BDRV_SECTOR_SIZE,
958 ide_issue_trim, s, ide_dma_cb, s,
959 DMA_DIRECTION_TO_DEVICE);
960 break;
961 default:
962 abort();
963 }
964 return;
965
966 eot:
967 if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) {
968 block_acct_done(blk_get_stats(s->blk), &s->acct);
969 }
970 ide_set_inactive(s, stay_active);
971 }
972
973 static void ide_sector_start_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
974 {
975 s->status = READY_STAT | SEEK_STAT | DRQ_STAT;
976 s->io_buffer_size = 0;
977 s->dma_cmd = dma_cmd;
978
979 switch (dma_cmd) {
980 case IDE_DMA_READ:
981 block_acct_start(blk_get_stats(s->blk), &s->acct,
982 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_READ);
983 break;
984 case IDE_DMA_WRITE:
985 block_acct_start(blk_get_stats(s->blk), &s->acct,
986 s->nsector * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
987 break;
988 default:
989 break;
990 }
991
992 ide_start_dma(s, ide_dma_cb);
993 }
994
995 void ide_start_dma(IDEState *s, BlockCompletionFunc *cb)
996 {
997 s->io_buffer_index = 0;
998 ide_set_retry(s);
999 if (s->bus->dma->ops->start_dma) {
1000 s->bus->dma->ops->start_dma(s->bus->dma, s, cb);
1001 }
1002 }
1003
1004 static void ide_sector_write(IDEState *s);
1005
1006 static void ide_sector_write_timer_cb(void *opaque)
1007 {
1008 IDEState *s = opaque;
1009 ide_set_irq(s->bus);
1010 }
1011
1012 static void ide_sector_write_cb(void *opaque, int ret)
1013 {
1014 IDEState *s = opaque;
1015 int n;
1016
1017 s->pio_aiocb = NULL;
1018 s->status &= ~BUSY_STAT;
1019
1020 if (ret != 0) {
1021 if (ide_handle_rw_error(s, -ret, IDE_RETRY_PIO)) {
1022 return;
1023 }
1024 }
1025
1026 block_acct_done(blk_get_stats(s->blk), &s->acct);
1027
1028 n = s->nsector;
1029 if (n > s->req_nb_sectors) {
1030 n = s->req_nb_sectors;
1031 }
1032 s->nsector -= n;
1033
1034 ide_set_sector(s, ide_get_sector(s) + n);
1035 if (s->nsector == 0) {
1036 /* no more sectors to write */
1037 ide_transfer_stop(s);
1038 } else {
1039 int n1 = s->nsector;
1040 if (n1 > s->req_nb_sectors) {
1041 n1 = s->req_nb_sectors;
1042 }
1043 ide_transfer_start(s, s->io_buffer, n1 * BDRV_SECTOR_SIZE,
1044 ide_sector_write);
1045 }
1046
1047 if (win2k_install_hack && ((++s->irq_count % 16) == 0)) {
1048 /* It seems there is a bug in the Windows 2000 installer HDD
1049 IDE driver which fills the disk with empty logs when the
1050 IDE write IRQ comes too early. This hack tries to correct
1051 that at the expense of slower write performances. Use this
1052 option _only_ to install Windows 2000. You must disable it
1053 for normal use. */
1054 timer_mod(s->sector_write_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
1055 (NANOSECONDS_PER_SECOND / 1000));
1056 } else {
1057 ide_set_irq(s->bus);
1058 }
1059 }
1060
1061 static void ide_sector_write(IDEState *s)
1062 {
1063 int64_t sector_num;
1064 int n;
1065
1066 s->status = READY_STAT | SEEK_STAT | BUSY_STAT;
1067 sector_num = ide_get_sector(s);
1068
1069 n = s->nsector;
1070 if (n > s->req_nb_sectors) {
1071 n = s->req_nb_sectors;
1072 }
1073
1074 trace_ide_sector_write(sector_num, n);
1075
1076 if (!ide_sect_range_ok(s, sector_num, n)) {
1077 ide_rw_error(s);
1078 block_acct_invalid(blk_get_stats(s->blk), BLOCK_ACCT_WRITE);
1079 return;
1080 }
1081
1082 qemu_iovec_init_buf(&s->qiov, s->io_buffer, n * BDRV_SECTOR_SIZE);
1083
1084 block_acct_start(blk_get_stats(s->blk), &s->acct,
1085 n * BDRV_SECTOR_SIZE, BLOCK_ACCT_WRITE);
1086 s->pio_aiocb = blk_aio_pwritev(s->blk, sector_num << BDRV_SECTOR_BITS,
1087 &s->qiov, 0, ide_sector_write_cb, s);
1088 }
1089
1090 static void ide_flush_cb(void *opaque, int ret)
1091 {
1092 IDEState *s = opaque;
1093
1094 s->pio_aiocb = NULL;
1095
1096 if (ret < 0) {
1097 /* XXX: What sector number to set here? */
1098 if (ide_handle_rw_error(s, -ret, IDE_RETRY_FLUSH)) {
1099 return;
1100 }
1101 }
1102
1103 if (s->blk) {
1104 block_acct_done(blk_get_stats(s->blk), &s->acct);
1105 }
1106 s->status = READY_STAT | SEEK_STAT;
1107 ide_cmd_done(s);
1108 ide_set_irq(s->bus);
1109 }
1110
1111 static void ide_flush_cache(IDEState *s)
1112 {
1113 if (s->blk == NULL) {
1114 ide_flush_cb(s, 0);
1115 return;
1116 }
1117
1118 s->status |= BUSY_STAT;
1119 ide_set_retry(s);
1120 block_acct_start(blk_get_stats(s->blk), &s->acct, 0, BLOCK_ACCT_FLUSH);
1121 s->pio_aiocb = blk_aio_flush(s->blk, ide_flush_cb, s);
1122 }
1123
1124 static void ide_cfata_metadata_inquiry(IDEState *s)
1125 {
1126 uint16_t *p;
1127 uint32_t spd;
1128
1129 p = (uint16_t *) s->io_buffer;
1130 memset(p, 0, 0x200);
1131 spd = ((s->mdata_size - 1) >> 9) + 1;
1132
1133 put_le16(p + 0, 0x0001); /* Data format revision */
1134 put_le16(p + 1, 0x0000); /* Media property: silicon */
1135 put_le16(p + 2, s->media_changed); /* Media status */
1136 put_le16(p + 3, s->mdata_size & 0xffff); /* Capacity in bytes (low) */
1137 put_le16(p + 4, s->mdata_size >> 16); /* Capacity in bytes (high) */
1138 put_le16(p + 5, spd & 0xffff); /* Sectors per device (low) */
1139 put_le16(p + 6, spd >> 16); /* Sectors per device (high) */
1140 }
1141
1142 static void ide_cfata_metadata_read(IDEState *s)
1143 {
1144 uint16_t *p;
1145
1146 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1147 s->status = ERR_STAT;
1148 s->error = ABRT_ERR;
1149 return;
1150 }
1151
1152 p = (uint16_t *) s->io_buffer;
1153 memset(p, 0, 0x200);
1154
1155 put_le16(p + 0, s->media_changed); /* Media status */
1156 memcpy(p + 1, s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1157 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1158 s->nsector << 9), 0x200 - 2));
1159 }
1160
1161 static void ide_cfata_metadata_write(IDEState *s)
1162 {
1163 if (((s->hcyl << 16) | s->lcyl) << 9 > s->mdata_size + 2) {
1164 s->status = ERR_STAT;
1165 s->error = ABRT_ERR;
1166 return;
1167 }
1168
1169 s->media_changed = 0;
1170
1171 memcpy(s->mdata_storage + (((s->hcyl << 16) | s->lcyl) << 9),
1172 s->io_buffer + 2,
1173 MIN(MIN(s->mdata_size - (((s->hcyl << 16) | s->lcyl) << 9),
1174 s->nsector << 9), 0x200 - 2));
1175 }
1176
1177 /* called when the inserted state of the media has changed */
1178 static void ide_cd_change_cb(void *opaque, bool load, Error **errp)
1179 {
1180 IDEState *s = opaque;
1181 uint64_t nb_sectors;
1182
1183 s->tray_open = !load;
1184 blk_get_geometry(s->blk, &nb_sectors);
1185 s->nb_sectors = nb_sectors;
1186
1187 /*
1188 * First indicate to the guest that a CD has been removed. That's
1189 * done on the next command the guest sends us.
1190 *
1191 * Then we set UNIT_ATTENTION, by which the guest will
1192 * detect a new CD in the drive. See ide_atapi_cmd() for details.
1193 */
1194 s->cdrom_changed = 1;
1195 s->events.new_media = true;
1196 s->events.eject_request = false;
1197 ide_set_irq(s->bus);
1198 }
1199
1200 static void ide_cd_eject_request_cb(void *opaque, bool force)
1201 {
1202 IDEState *s = opaque;
1203
1204 s->events.eject_request = true;
1205 if (force) {
1206 s->tray_locked = false;
1207 }
1208 ide_set_irq(s->bus);
1209 }
1210
1211 static void ide_cmd_lba48_transform(IDEState *s, int lba48)
1212 {
1213 s->lba48 = lba48;
1214
1215 /* handle the 'magic' 0 nsector count conversion here. to avoid
1216 * fiddling with the rest of the read logic, we just store the
1217 * full sector count in ->nsector and ignore ->hob_nsector from now
1218 */
1219 if (!s->lba48) {
1220 if (!s->nsector)
1221 s->nsector = 256;
1222 } else {
1223 if (!s->nsector && !s->hob_nsector)
1224 s->nsector = 65536;
1225 else {
1226 int lo = s->nsector;
1227 int hi = s->hob_nsector;
1228
1229 s->nsector = (hi << 8) | lo;
1230 }
1231 }
1232 }
1233
1234 static void ide_clear_hob(IDEBus *bus)
1235 {
1236 /* any write clears HOB high bit of device control register */
1237 bus->cmd &= ~(IDE_CTRL_HOB);
1238 }
1239
1240 /* IOport [W]rite [R]egisters */
1241 enum ATA_IOPORT_WR {
1242 ATA_IOPORT_WR_DATA = 0,
1243 ATA_IOPORT_WR_FEATURES = 1,
1244 ATA_IOPORT_WR_SECTOR_COUNT = 2,
1245 ATA_IOPORT_WR_SECTOR_NUMBER = 3,
1246 ATA_IOPORT_WR_CYLINDER_LOW = 4,
1247 ATA_IOPORT_WR_CYLINDER_HIGH = 5,
1248 ATA_IOPORT_WR_DEVICE_HEAD = 6,
1249 ATA_IOPORT_WR_COMMAND = 7,
1250 ATA_IOPORT_WR_NUM_REGISTERS,
1251 };
1252
1253 const char *ATA_IOPORT_WR_lookup[ATA_IOPORT_WR_NUM_REGISTERS] = {
1254 [ATA_IOPORT_WR_DATA] = "Data",
1255 [ATA_IOPORT_WR_FEATURES] = "Features",
1256 [ATA_IOPORT_WR_SECTOR_COUNT] = "Sector Count",
1257 [ATA_IOPORT_WR_SECTOR_NUMBER] = "Sector Number",
1258 [ATA_IOPORT_WR_CYLINDER_LOW] = "Cylinder Low",
1259 [ATA_IOPORT_WR_CYLINDER_HIGH] = "Cylinder High",
1260 [ATA_IOPORT_WR_DEVICE_HEAD] = "Device/Head",
1261 [ATA_IOPORT_WR_COMMAND] = "Command"
1262 };
1263
1264 void ide_ioport_write(void *opaque, uint32_t addr, uint32_t val)
1265 {
1266 IDEBus *bus = opaque;
1267 IDEState *s = idebus_active_if(bus);
1268 int reg_num = addr & 7;
1269
1270 trace_ide_ioport_write(addr, ATA_IOPORT_WR_lookup[reg_num], val, bus, s);
1271
1272 /* ignore writes to command block while busy with previous command */
1273 if (reg_num != 7 && (s->status & (BUSY_STAT|DRQ_STAT))) {
1274 return;
1275 }
1276
1277 /* NOTE: Device0 and Device1 both receive incoming register writes.
1278 * (They're on the same bus! They have to!) */
1279
1280 switch (reg_num) {
1281 case 0:
1282 break;
1283 case ATA_IOPORT_WR_FEATURES:
1284 ide_clear_hob(bus);
1285 bus->ifs[0].hob_feature = bus->ifs[0].feature;
1286 bus->ifs[1].hob_feature = bus->ifs[1].feature;
1287 bus->ifs[0].feature = val;
1288 bus->ifs[1].feature = val;
1289 break;
1290 case ATA_IOPORT_WR_SECTOR_COUNT:
1291 ide_clear_hob(bus);
1292 bus->ifs[0].hob_nsector = bus->ifs[0].nsector;
1293 bus->ifs[1].hob_nsector = bus->ifs[1].nsector;
1294 bus->ifs[0].nsector = val;
1295 bus->ifs[1].nsector = val;
1296 break;
1297 case ATA_IOPORT_WR_SECTOR_NUMBER:
1298 ide_clear_hob(bus);
1299 bus->ifs[0].hob_sector = bus->ifs[0].sector;
1300 bus->ifs[1].hob_sector = bus->ifs[1].sector;
1301 bus->ifs[0].sector = val;
1302 bus->ifs[1].sector = val;
1303 break;
1304 case ATA_IOPORT_WR_CYLINDER_LOW:
1305 ide_clear_hob(bus);
1306 bus->ifs[0].hob_lcyl = bus->ifs[0].lcyl;
1307 bus->ifs[1].hob_lcyl = bus->ifs[1].lcyl;
1308 bus->ifs[0].lcyl = val;
1309 bus->ifs[1].lcyl = val;
1310 break;
1311 case ATA_IOPORT_WR_CYLINDER_HIGH:
1312 ide_clear_hob(bus);
1313 bus->ifs[0].hob_hcyl = bus->ifs[0].hcyl;
1314 bus->ifs[1].hob_hcyl = bus->ifs[1].hcyl;
1315 bus->ifs[0].hcyl = val;
1316 bus->ifs[1].hcyl = val;
1317 break;
1318 case ATA_IOPORT_WR_DEVICE_HEAD:
1319 ide_clear_hob(bus);
1320 bus->ifs[0].select = val | (ATA_DEV_ALWAYS_ON);
1321 bus->ifs[1].select = val | (ATA_DEV_ALWAYS_ON);
1322 /* select drive */
1323 bus->unit = (val & (ATA_DEV_SELECT)) ? 1 : 0;
1324 break;
1325 default:
1326 case ATA_IOPORT_WR_COMMAND:
1327 ide_clear_hob(bus);
1328 qemu_irq_lower(bus->irq);
1329 ide_exec_cmd(bus, val);
1330 break;
1331 }
1332 }
1333
1334 static void ide_reset(IDEState *s)
1335 {
1336 trace_ide_reset(s);
1337
1338 if (s->pio_aiocb) {
1339 blk_aio_cancel(s->pio_aiocb);
1340 s->pio_aiocb = NULL;
1341 }
1342
1343 if (s->reset_reverts) {
1344 s->reset_reverts = false;
1345 s->heads = s->drive_heads;
1346 s->sectors = s->drive_sectors;
1347 }
1348 if (s->drive_kind == IDE_CFATA)
1349 s->mult_sectors = 0;
1350 else
1351 s->mult_sectors = MAX_MULT_SECTORS;
1352 /* ide regs */
1353 s->feature = 0;
1354 s->error = 0;
1355 s->nsector = 0;
1356 s->sector = 0;
1357 s->lcyl = 0;
1358 s->hcyl = 0;
1359
1360 /* lba48 */
1361 s->hob_feature = 0;
1362 s->hob_sector = 0;
1363 s->hob_nsector = 0;
1364 s->hob_lcyl = 0;
1365 s->hob_hcyl = 0;
1366
1367 s->select = (ATA_DEV_ALWAYS_ON);
1368 s->status = READY_STAT | SEEK_STAT;
1369
1370 s->lba48 = 0;
1371
1372 /* ATAPI specific */
1373 s->sense_key = 0;
1374 s->asc = 0;
1375 s->cdrom_changed = 0;
1376 s->packet_transfer_size = 0;
1377 s->elementary_transfer_size = 0;
1378 s->io_buffer_index = 0;
1379 s->cd_sector_size = 0;
1380 s->atapi_dma = 0;
1381 s->tray_locked = 0;
1382 s->tray_open = 0;
1383 /* ATA DMA state */
1384 s->io_buffer_size = 0;
1385 s->req_nb_sectors = 0;
1386
1387 ide_set_signature(s);
1388 /* init the transfer handler so that 0xffff is returned on data
1389 accesses */
1390 s->end_transfer_func = ide_dummy_transfer_stop;
1391 ide_dummy_transfer_stop(s);
1392 s->media_changed = 0;
1393 }
1394
1395 static bool cmd_nop(IDEState *s, uint8_t cmd)
1396 {
1397 return true;
1398 }
1399
1400 static bool cmd_device_reset(IDEState *s, uint8_t cmd)
1401 {
1402 /* Halt PIO (in the DRQ phase), then DMA */
1403 ide_transfer_halt(s);
1404 ide_cancel_dma_sync(s);
1405
1406 /* Reset any PIO commands, reset signature, etc */
1407 ide_reset(s);
1408
1409 /* RESET: ATA8-ACS3 7.10.4 "Normal Outputs";
1410 * ATA8-ACS3 Table 184 "Device Signatures for Normal Output" */
1411 s->status = 0x00;
1412
1413 /* Do not overwrite status register */
1414 return false;
1415 }
1416
1417 static bool cmd_data_set_management(IDEState *s, uint8_t cmd)
1418 {
1419 switch (s->feature) {
1420 case DSM_TRIM:
1421 if (s->blk) {
1422 ide_sector_start_dma(s, IDE_DMA_TRIM);
1423 return false;
1424 }
1425 break;
1426 }
1427
1428 ide_abort_command(s);
1429 return true;
1430 }
1431
1432 static bool cmd_identify(IDEState *s, uint8_t cmd)
1433 {
1434 if (s->blk && s->drive_kind != IDE_CD) {
1435 if (s->drive_kind != IDE_CFATA) {
1436 ide_identify(s);
1437 } else {
1438 ide_cfata_identify(s);
1439 }
1440 s->status = READY_STAT | SEEK_STAT;
1441 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1442 ide_set_irq(s->bus);
1443 return false;
1444 } else {
1445 if (s->drive_kind == IDE_CD) {
1446 ide_set_signature(s);
1447 }
1448 ide_abort_command(s);
1449 }
1450
1451 return true;
1452 }
1453
1454 static bool cmd_verify(IDEState *s, uint8_t cmd)
1455 {
1456 bool lba48 = (cmd == WIN_VERIFY_EXT);
1457
1458 /* do sector number check ? */
1459 ide_cmd_lba48_transform(s, lba48);
1460
1461 return true;
1462 }
1463
1464 static bool cmd_set_multiple_mode(IDEState *s, uint8_t cmd)
1465 {
1466 if (s->drive_kind == IDE_CFATA && s->nsector == 0) {
1467 /* Disable Read and Write Multiple */
1468 s->mult_sectors = 0;
1469 } else if ((s->nsector & 0xff) != 0 &&
1470 ((s->nsector & 0xff) > MAX_MULT_SECTORS ||
1471 (s->nsector & (s->nsector - 1)) != 0)) {
1472 ide_abort_command(s);
1473 } else {
1474 s->mult_sectors = s->nsector & 0xff;
1475 }
1476
1477 return true;
1478 }
1479
1480 static bool cmd_read_multiple(IDEState *s, uint8_t cmd)
1481 {
1482 bool lba48 = (cmd == WIN_MULTREAD_EXT);
1483
1484 if (!s->blk || !s->mult_sectors) {
1485 ide_abort_command(s);
1486 return true;
1487 }
1488
1489 ide_cmd_lba48_transform(s, lba48);
1490 s->req_nb_sectors = s->mult_sectors;
1491 ide_sector_read(s);
1492 return false;
1493 }
1494
1495 static bool cmd_write_multiple(IDEState *s, uint8_t cmd)
1496 {
1497 bool lba48 = (cmd == WIN_MULTWRITE_EXT);
1498 int n;
1499
1500 if (!s->blk || !s->mult_sectors) {
1501 ide_abort_command(s);
1502 return true;
1503 }
1504
1505 ide_cmd_lba48_transform(s, lba48);
1506
1507 s->req_nb_sectors = s->mult_sectors;
1508 n = MIN(s->nsector, s->req_nb_sectors);
1509
1510 s->status = SEEK_STAT | READY_STAT;
1511 ide_transfer_start(s, s->io_buffer, 512 * n, ide_sector_write);
1512
1513 s->media_changed = 1;
1514
1515 return false;
1516 }
1517
1518 static bool cmd_read_pio(IDEState *s, uint8_t cmd)
1519 {
1520 bool lba48 = (cmd == WIN_READ_EXT);
1521
1522 if (s->drive_kind == IDE_CD) {
1523 ide_set_signature(s); /* odd, but ATA4 8.27.5.2 requires it */
1524 ide_abort_command(s);
1525 return true;
1526 }
1527
1528 if (!s->blk) {
1529 ide_abort_command(s);
1530 return true;
1531 }
1532
1533 ide_cmd_lba48_transform(s, lba48);
1534 s->req_nb_sectors = 1;
1535 ide_sector_read(s);
1536
1537 return false;
1538 }
1539
1540 static bool cmd_write_pio(IDEState *s, uint8_t cmd)
1541 {
1542 bool lba48 = (cmd == WIN_WRITE_EXT);
1543
1544 if (!s->blk) {
1545 ide_abort_command(s);
1546 return true;
1547 }
1548
1549 ide_cmd_lba48_transform(s, lba48);
1550
1551 s->req_nb_sectors = 1;
1552 s->status = SEEK_STAT | READY_STAT;
1553 ide_transfer_start(s, s->io_buffer, 512, ide_sector_write);
1554
1555 s->media_changed = 1;
1556
1557 return false;
1558 }
1559
1560 static bool cmd_read_dma(IDEState *s, uint8_t cmd)
1561 {
1562 bool lba48 = (cmd == WIN_READDMA_EXT);
1563
1564 if (!s->blk) {
1565 ide_abort_command(s);
1566 return true;
1567 }
1568
1569 ide_cmd_lba48_transform(s, lba48);
1570 ide_sector_start_dma(s, IDE_DMA_READ);
1571
1572 return false;
1573 }
1574
1575 static bool cmd_write_dma(IDEState *s, uint8_t cmd)
1576 {
1577 bool lba48 = (cmd == WIN_WRITEDMA_EXT);
1578
1579 if (!s->blk) {
1580 ide_abort_command(s);
1581 return true;
1582 }
1583
1584 ide_cmd_lba48_transform(s, lba48);
1585 ide_sector_start_dma(s, IDE_DMA_WRITE);
1586
1587 s->media_changed = 1;
1588
1589 return false;
1590 }
1591
1592 static bool cmd_flush_cache(IDEState *s, uint8_t cmd)
1593 {
1594 ide_flush_cache(s);
1595 return false;
1596 }
1597
1598 static bool cmd_seek(IDEState *s, uint8_t cmd)
1599 {
1600 /* XXX: Check that seek is within bounds */
1601 return true;
1602 }
1603
1604 static bool cmd_read_native_max(IDEState *s, uint8_t cmd)
1605 {
1606 bool lba48 = (cmd == WIN_READ_NATIVE_MAX_EXT);
1607
1608 /* Refuse if no sectors are addressable (e.g. medium not inserted) */
1609 if (s->nb_sectors == 0) {
1610 ide_abort_command(s);
1611 return true;
1612 }
1613
1614 ide_cmd_lba48_transform(s, lba48);
1615 ide_set_sector(s, s->nb_sectors - 1);
1616
1617 return true;
1618 }
1619
1620 static bool cmd_check_power_mode(IDEState *s, uint8_t cmd)
1621 {
1622 s->nsector = 0xff; /* device active or idle */
1623 return true;
1624 }
1625
1626 /* INITIALIZE DEVICE PARAMETERS */
1627 static bool cmd_specify(IDEState *s, uint8_t cmd)
1628 {
1629 if (s->blk && s->drive_kind != IDE_CD) {
1630 s->heads = (s->select & (ATA_DEV_HS)) + 1;
1631 s->sectors = s->nsector;
1632 ide_set_irq(s->bus);
1633 } else {
1634 ide_abort_command(s);
1635 }
1636
1637 return true;
1638 }
1639
1640 static bool cmd_set_features(IDEState *s, uint8_t cmd)
1641 {
1642 uint16_t *identify_data;
1643
1644 if (!s->blk) {
1645 ide_abort_command(s);
1646 return true;
1647 }
1648
1649 /* XXX: valid for CDROM ? */
1650 switch (s->feature) {
1651 case 0x01: /* 8-bit I/O enable (CompactFlash) */
1652 case 0x81: /* 8-bit I/O disable (CompactFlash) */
1653 if (s->drive_kind != IDE_CFATA) {
1654 goto abort_cmd;
1655 }
1656 s->io8 = !(s->feature & 0x80);
1657 return true;
1658 case 0x02: /* write cache enable */
1659 blk_set_enable_write_cache(s->blk, true);
1660 identify_data = (uint16_t *)s->identify_data;
1661 put_le16(identify_data + 85, (1 << 14) | (1 << 5) | 1);
1662 return true;
1663 case 0x82: /* write cache disable */
1664 blk_set_enable_write_cache(s->blk, false);
1665 identify_data = (uint16_t *)s->identify_data;
1666 put_le16(identify_data + 85, (1 << 14) | 1);
1667 ide_flush_cache(s);
1668 return false;
1669 case 0xcc: /* reverting to power-on defaults enable */
1670 s->reset_reverts = true;
1671 return true;
1672 case 0x66: /* reverting to power-on defaults disable */
1673 s->reset_reverts = false;
1674 return true;
1675 case 0xaa: /* read look-ahead enable */
1676 case 0x55: /* read look-ahead disable */
1677 case 0x05: /* set advanced power management mode */
1678 case 0x85: /* disable advanced power management mode */
1679 case 0x69: /* NOP */
1680 case 0x67: /* NOP */
1681 case 0x96: /* NOP */
1682 case 0x9a: /* NOP */
1683 case 0x42: /* enable Automatic Acoustic Mode */
1684 case 0xc2: /* disable Automatic Acoustic Mode */
1685 return true;
1686 case 0x03: /* set transfer mode */
1687 {
1688 uint8_t val = s->nsector & 0x07;
1689 identify_data = (uint16_t *)s->identify_data;
1690
1691 switch (s->nsector >> 3) {
1692 case 0x00: /* pio default */
1693 case 0x01: /* pio mode */
1694 put_le16(identify_data + 62, 0x07);
1695 put_le16(identify_data + 63, 0x07);
1696 put_le16(identify_data + 88, 0x3f);
1697 break;
1698 case 0x02: /* sigle word dma mode*/
1699 put_le16(identify_data + 62, 0x07 | (1 << (val + 8)));
1700 put_le16(identify_data + 63, 0x07);
1701 put_le16(identify_data + 88, 0x3f);
1702 break;
1703 case 0x04: /* mdma mode */
1704 put_le16(identify_data + 62, 0x07);
1705 put_le16(identify_data + 63, 0x07 | (1 << (val + 8)));
1706 put_le16(identify_data + 88, 0x3f);
1707 break;
1708 case 0x08: /* udma mode */
1709 put_le16(identify_data + 62, 0x07);
1710 put_le16(identify_data + 63, 0x07);
1711 put_le16(identify_data + 88, 0x3f | (1 << (val + 8)));
1712 break;
1713 default:
1714 goto abort_cmd;
1715 }
1716 return true;
1717 }
1718 }
1719
1720 abort_cmd:
1721 ide_abort_command(s);
1722 return true;
1723 }
1724
1725
1726 /*** ATAPI commands ***/
1727
1728 static bool cmd_identify_packet(IDEState *s, uint8_t cmd)
1729 {
1730 ide_atapi_identify(s);
1731 s->status = READY_STAT | SEEK_STAT;
1732 ide_transfer_start(s, s->io_buffer, 512, ide_transfer_stop);
1733 ide_set_irq(s->bus);
1734 return false;
1735 }
1736
1737 /* EXECUTE DEVICE DIAGNOSTIC */
1738 static bool cmd_exec_dev_diagnostic(IDEState *s, uint8_t cmd)
1739 {
1740 /*
1741 * Clear the device register per the ATA (v6) specification,
1742 * because ide_set_signature does not clear LBA or drive bits.
1743 */
1744 s->select = (ATA_DEV_ALWAYS_ON);
1745 ide_set_signature(s);
1746
1747 if (s->drive_kind == IDE_CD) {
1748 s->status = 0; /* ATAPI spec (v6) section 9.10 defines packet
1749 * devices to return a clear status register
1750 * with READY_STAT *not* set. */
1751 s->error = 0x01;
1752 } else {
1753 s->status = READY_STAT | SEEK_STAT;
1754 /* The bits of the error register are not as usual for this command!
1755 * They are part of the regular output (this is why ERR_STAT isn't set)
1756 * Device 0 passed, Device 1 passed or not present. */
1757 s->error = 0x01;
1758 ide_set_irq(s->bus);
1759 }
1760
1761 return false;
1762 }
1763
1764 static bool cmd_packet(IDEState *s, uint8_t cmd)
1765 {
1766 /* overlapping commands not supported */
1767 if (s->feature & 0x02) {
1768 ide_abort_command(s);
1769 return true;
1770 }
1771
1772 s->status = READY_STAT | SEEK_STAT;
1773 s->atapi_dma = s->feature & 1;
1774 if (s->atapi_dma) {
1775 s->dma_cmd = IDE_DMA_ATAPI;
1776 }
1777 s->nsector = 1;
1778 ide_transfer_start(s, s->io_buffer, ATAPI_PACKET_SIZE,
1779 ide_atapi_cmd);
1780 return false;
1781 }
1782
1783
1784 /*** CF-ATA commands ***/
1785
1786 static bool cmd_cfa_req_ext_error_code(IDEState *s, uint8_t cmd)
1787 {
1788 s->error = 0x09; /* miscellaneous error */
1789 s->status = READY_STAT | SEEK_STAT;
1790 ide_set_irq(s->bus);
1791
1792 return false;
1793 }
1794
1795 static bool cmd_cfa_erase_sectors(IDEState *s, uint8_t cmd)
1796 {
1797 /* WIN_SECURITY_FREEZE_LOCK has the same ID as CFA_WEAR_LEVEL and is
1798 * required for Windows 8 to work with AHCI */
1799
1800 if (cmd == CFA_WEAR_LEVEL) {
1801 s->nsector = 0;
1802 }
1803
1804 if (cmd == CFA_ERASE_SECTORS) {
1805 s->media_changed = 1;
1806 }
1807
1808 return true;
1809 }
1810
1811 static bool cmd_cfa_translate_sector(IDEState *s, uint8_t cmd)
1812 {
1813 s->status = READY_STAT | SEEK_STAT;
1814
1815 memset(s->io_buffer, 0, 0x200);
1816 s->io_buffer[0x00] = s->hcyl; /* Cyl MSB */
1817 s->io_buffer[0x01] = s->lcyl; /* Cyl LSB */
1818 s->io_buffer[0x02] = s->select; /* Head */
1819 s->io_buffer[0x03] = s->sector; /* Sector */
1820 s->io_buffer[0x04] = ide_get_sector(s) >> 16; /* LBA MSB */
1821 s->io_buffer[0x05] = ide_get_sector(s) >> 8; /* LBA */
1822 s->io_buffer[0x06] = ide_get_sector(s) >> 0; /* LBA LSB */
1823 s->io_buffer[0x13] = 0x00; /* Erase flag */
1824 s->io_buffer[0x18] = 0x00; /* Hot count */
1825 s->io_buffer[0x19] = 0x00; /* Hot count */
1826 s->io_buffer[0x1a] = 0x01; /* Hot count */
1827
1828 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1829 ide_set_irq(s->bus);
1830
1831 return false;
1832 }
1833
1834 static bool cmd_cfa_access_metadata_storage(IDEState *s, uint8_t cmd)
1835 {
1836 switch (s->feature) {
1837 case 0x02: /* Inquiry Metadata Storage */
1838 ide_cfata_metadata_inquiry(s);
1839 break;
1840 case 0x03: /* Read Metadata Storage */
1841 ide_cfata_metadata_read(s);
1842 break;
1843 case 0x04: /* Write Metadata Storage */
1844 ide_cfata_metadata_write(s);
1845 break;
1846 default:
1847 ide_abort_command(s);
1848 return true;
1849 }
1850
1851 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1852 s->status = 0x00; /* NOTE: READY is _not_ set */
1853 ide_set_irq(s->bus);
1854
1855 return false;
1856 }
1857
1858 static bool cmd_ibm_sense_condition(IDEState *s, uint8_t cmd)
1859 {
1860 switch (s->feature) {
1861 case 0x01: /* sense temperature in device */
1862 s->nsector = 0x50; /* +20 C */
1863 break;
1864 default:
1865 ide_abort_command(s);
1866 return true;
1867 }
1868
1869 return true;
1870 }
1871
1872
1873 /*** SMART commands ***/
1874
1875 static bool cmd_smart(IDEState *s, uint8_t cmd)
1876 {
1877 int n;
1878
1879 if (s->hcyl != 0xc2 || s->lcyl != 0x4f) {
1880 goto abort_cmd;
1881 }
1882
1883 if (!s->smart_enabled && s->feature != SMART_ENABLE) {
1884 goto abort_cmd;
1885 }
1886
1887 switch (s->feature) {
1888 case SMART_DISABLE:
1889 s->smart_enabled = 0;
1890 return true;
1891
1892 case SMART_ENABLE:
1893 s->smart_enabled = 1;
1894 return true;
1895
1896 case SMART_ATTR_AUTOSAVE:
1897 switch (s->sector) {
1898 case 0x00:
1899 s->smart_autosave = 0;
1900 break;
1901 case 0xf1:
1902 s->smart_autosave = 1;
1903 break;
1904 default:
1905 goto abort_cmd;
1906 }
1907 return true;
1908
1909 case SMART_STATUS:
1910 if (!s->smart_errors) {
1911 s->hcyl = 0xc2;
1912 s->lcyl = 0x4f;
1913 } else {
1914 s->hcyl = 0x2c;
1915 s->lcyl = 0xf4;
1916 }
1917 return true;
1918
1919 case SMART_READ_THRESH:
1920 memset(s->io_buffer, 0, 0x200);
1921 s->io_buffer[0] = 0x01; /* smart struct version */
1922
1923 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1924 s->io_buffer[2 + 0 + (n * 12)] = smart_attributes[n][0];
1925 s->io_buffer[2 + 1 + (n * 12)] = smart_attributes[n][11];
1926 }
1927
1928 /* checksum */
1929 for (n = 0; n < 511; n++) {
1930 s->io_buffer[511] += s->io_buffer[n];
1931 }
1932 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1933
1934 s->status = READY_STAT | SEEK_STAT;
1935 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1936 ide_set_irq(s->bus);
1937 return false;
1938
1939 case SMART_READ_DATA:
1940 memset(s->io_buffer, 0, 0x200);
1941 s->io_buffer[0] = 0x01; /* smart struct version */
1942
1943 for (n = 0; n < ARRAY_SIZE(smart_attributes); n++) {
1944 int i;
1945 for (i = 0; i < 11; i++) {
1946 s->io_buffer[2 + i + (n * 12)] = smart_attributes[n][i];
1947 }
1948 }
1949
1950 s->io_buffer[362] = 0x02 | (s->smart_autosave ? 0x80 : 0x00);
1951 if (s->smart_selftest_count == 0) {
1952 s->io_buffer[363] = 0;
1953 } else {
1954 s->io_buffer[363] =
1955 s->smart_selftest_data[3 +
1956 (s->smart_selftest_count - 1) *
1957 24];
1958 }
1959 s->io_buffer[364] = 0x20;
1960 s->io_buffer[365] = 0x01;
1961 /* offline data collection capacity: execute + self-test*/
1962 s->io_buffer[367] = (1 << 4 | 1 << 3 | 1);
1963 s->io_buffer[368] = 0x03; /* smart capability (1) */
1964 s->io_buffer[369] = 0x00; /* smart capability (2) */
1965 s->io_buffer[370] = 0x01; /* error logging supported */
1966 s->io_buffer[372] = 0x02; /* minutes for poll short test */
1967 s->io_buffer[373] = 0x36; /* minutes for poll ext test */
1968 s->io_buffer[374] = 0x01; /* minutes for poll conveyance */
1969
1970 for (n = 0; n < 511; n++) {
1971 s->io_buffer[511] += s->io_buffer[n];
1972 }
1973 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1974
1975 s->status = READY_STAT | SEEK_STAT;
1976 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
1977 ide_set_irq(s->bus);
1978 return false;
1979
1980 case SMART_READ_LOG:
1981 switch (s->sector) {
1982 case 0x01: /* summary smart error log */
1983 memset(s->io_buffer, 0, 0x200);
1984 s->io_buffer[0] = 0x01;
1985 s->io_buffer[1] = 0x00; /* no error entries */
1986 s->io_buffer[452] = s->smart_errors & 0xff;
1987 s->io_buffer[453] = (s->smart_errors & 0xff00) >> 8;
1988
1989 for (n = 0; n < 511; n++) {
1990 s->io_buffer[511] += s->io_buffer[n];
1991 }
1992 s->io_buffer[511] = 0x100 - s->io_buffer[511];
1993 break;
1994 case 0x06: /* smart self test log */
1995 memset(s->io_buffer, 0, 0x200);
1996 s->io_buffer[0] = 0x01;
1997 if (s->smart_selftest_count == 0) {
1998 s->io_buffer[508] = 0;
1999 } else {
2000 s->io_buffer[508] = s->smart_selftest_count;
2001 for (n = 2; n < 506; n++) {
2002 s->io_buffer[n] = s->smart_selftest_data[n];
2003 }
2004 }
2005
2006 for (n = 0; n < 511; n++) {
2007 s->io_buffer[511] += s->io_buffer[n];
2008 }
2009 s->io_buffer[511] = 0x100 - s->io_buffer[511];
2010 break;
2011 default:
2012 goto abort_cmd;
2013 }
2014 s->status = READY_STAT | SEEK_STAT;
2015 ide_transfer_start(s, s->io_buffer, 0x200, ide_transfer_stop);
2016 ide_set_irq(s->bus);
2017 return false;
2018
2019 case SMART_EXECUTE_OFFLINE:
2020 switch (s->sector) {
2021 case 0: /* off-line routine */
2022 case 1: /* short self test */
2023 case 2: /* extended self test */
2024 s->smart_selftest_count++;
2025 if (s->smart_selftest_count > 21) {
2026 s->smart_selftest_count = 1;
2027 }
2028 n = 2 + (s->smart_selftest_count - 1) * 24;
2029 s->smart_selftest_data[n] = s->sector;
2030 s->smart_selftest_data[n + 1] = 0x00; /* OK and finished */
2031 s->smart_selftest_data[n + 2] = 0x34; /* hour count lsb */
2032 s->smart_selftest_data[n + 3] = 0x12; /* hour count msb */
2033 break;
2034 default:
2035 goto abort_cmd;
2036 }
2037 return true;
2038 }
2039
2040 abort_cmd:
2041 ide_abort_command(s);
2042 return true;
2043 }
2044
2045 #define HD_OK (1u << IDE_HD)
2046 #define CD_OK (1u << IDE_CD)
2047 #define CFA_OK (1u << IDE_CFATA)
2048 #define HD_CFA_OK (HD_OK | CFA_OK)
2049 #define ALL_OK (HD_OK | CD_OK | CFA_OK)
2050
2051 /* Set the Disk Seek Completed status bit during completion */
2052 #define SET_DSC (1u << 8)
2053
2054 /* See ACS-2 T13/2015-D Table B.2 Command codes */
2055 static const struct {
2056 /* Returns true if the completion code should be run */
2057 bool (*handler)(IDEState *s, uint8_t cmd);
2058 int flags;
2059 } ide_cmd_table[0x100] = {
2060 /* NOP not implemented, mandatory for CD */
2061 [CFA_REQ_EXT_ERROR_CODE] = { cmd_cfa_req_ext_error_code, CFA_OK },
2062 [WIN_DSM] = { cmd_data_set_management, HD_CFA_OK },
2063 [WIN_DEVICE_RESET] = { cmd_device_reset, CD_OK },
2064 [WIN_RECAL] = { cmd_nop, HD_CFA_OK | SET_DSC},
2065 [WIN_READ] = { cmd_read_pio, ALL_OK },
2066 [WIN_READ_ONCE] = { cmd_read_pio, HD_CFA_OK },
2067 [WIN_READ_EXT] = { cmd_read_pio, HD_CFA_OK },
2068 [WIN_READDMA_EXT] = { cmd_read_dma, HD_CFA_OK },
2069 [WIN_READ_NATIVE_MAX_EXT] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2070 [WIN_MULTREAD_EXT] = { cmd_read_multiple, HD_CFA_OK },
2071 [WIN_WRITE] = { cmd_write_pio, HD_CFA_OK },
2072 [WIN_WRITE_ONCE] = { cmd_write_pio, HD_CFA_OK },
2073 [WIN_WRITE_EXT] = { cmd_write_pio, HD_CFA_OK },
2074 [WIN_WRITEDMA_EXT] = { cmd_write_dma, HD_CFA_OK },
2075 [CFA_WRITE_SECT_WO_ERASE] = { cmd_write_pio, CFA_OK },
2076 [WIN_MULTWRITE_EXT] = { cmd_write_multiple, HD_CFA_OK },
2077 [WIN_WRITE_VERIFY] = { cmd_write_pio, HD_CFA_OK },
2078 [WIN_VERIFY] = { cmd_verify, HD_CFA_OK | SET_DSC },
2079 [WIN_VERIFY_ONCE] = { cmd_verify, HD_CFA_OK | SET_DSC },
2080 [WIN_VERIFY_EXT] = { cmd_verify, HD_CFA_OK | SET_DSC },
2081 [WIN_SEEK] = { cmd_seek, HD_CFA_OK | SET_DSC },
2082 [CFA_TRANSLATE_SECTOR] = { cmd_cfa_translate_sector, CFA_OK },
2083 [WIN_DIAGNOSE] = { cmd_exec_dev_diagnostic, ALL_OK },
2084 [WIN_SPECIFY] = { cmd_specify, HD_CFA_OK | SET_DSC },
2085 [WIN_STANDBYNOW2] = { cmd_nop, HD_CFA_OK },
2086 [WIN_IDLEIMMEDIATE2] = { cmd_nop, HD_CFA_OK },
2087 [WIN_STANDBY2] = { cmd_nop, HD_CFA_OK },
2088 [WIN_SETIDLE2] = { cmd_nop, HD_CFA_OK },
2089 [WIN_CHECKPOWERMODE2] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2090 [WIN_SLEEPNOW2] = { cmd_nop, HD_CFA_OK },
2091 [WIN_PACKETCMD] = { cmd_packet, CD_OK },
2092 [WIN_PIDENTIFY] = { cmd_identify_packet, CD_OK },
2093 [WIN_SMART] = { cmd_smart, HD_CFA_OK | SET_DSC },
2094 [CFA_ACCESS_METADATA_STORAGE] = { cmd_cfa_access_metadata_storage, CFA_OK },
2095 [CFA_ERASE_SECTORS] = { cmd_cfa_erase_sectors, CFA_OK | SET_DSC },
2096 [WIN_MULTREAD] = { cmd_read_multiple, HD_CFA_OK },
2097 [WIN_MULTWRITE] = { cmd_write_multiple, HD_CFA_OK },
2098 [WIN_SETMULT] = { cmd_set_multiple_mode, HD_CFA_OK | SET_DSC },
2099 [WIN_READDMA] = { cmd_read_dma, HD_CFA_OK },
2100 [WIN_READDMA_ONCE] = { cmd_read_dma, HD_CFA_OK },
2101 [WIN_WRITEDMA] = { cmd_write_dma, HD_CFA_OK },
2102 [WIN_WRITEDMA_ONCE] = { cmd_write_dma, HD_CFA_OK },
2103 [CFA_WRITE_MULTI_WO_ERASE] = { cmd_write_multiple, CFA_OK },
2104 [WIN_STANDBYNOW1] = { cmd_nop, HD_CFA_OK },
2105 [WIN_IDLEIMMEDIATE] = { cmd_nop, HD_CFA_OK },
2106 [WIN_STANDBY] = { cmd_nop, HD_CFA_OK },
2107 [WIN_SETIDLE1] = { cmd_nop, HD_CFA_OK },
2108 [WIN_CHECKPOWERMODE1] = { cmd_check_power_mode, HD_CFA_OK | SET_DSC },
2109 [WIN_SLEEPNOW1] = { cmd_nop, HD_CFA_OK },
2110 [WIN_FLUSH_CACHE] = { cmd_flush_cache, ALL_OK },
2111 [WIN_FLUSH_CACHE_EXT] = { cmd_flush_cache, HD_CFA_OK },
2112 [WIN_IDENTIFY] = { cmd_identify, ALL_OK },
2113 [WIN_SETFEATURES] = { cmd_set_features, ALL_OK | SET_DSC },
2114 [IBM_SENSE_CONDITION] = { cmd_ibm_sense_condition, CFA_OK | SET_DSC },
2115 [CFA_WEAR_LEVEL] = { cmd_cfa_erase_sectors, HD_CFA_OK | SET_DSC },
2116 [WIN_READ_NATIVE_MAX] = { cmd_read_native_max, HD_CFA_OK | SET_DSC },
2117 };
2118
2119 static bool ide_cmd_permitted(IDEState *s, uint32_t cmd)
2120 {
2121 return cmd < ARRAY_SIZE(ide_cmd_table)
2122 && (ide_cmd_table[cmd].flags & (1u << s->drive_kind));
2123 }
2124
2125 void ide_exec_cmd(IDEBus *bus, uint32_t val)
2126 {
2127 IDEState *s;
2128 bool complete;
2129
2130 s = idebus_active_if(bus);
2131 trace_ide_exec_cmd(bus, s, val);
2132
2133 /* ignore commands to non existent slave */
2134 if (s != bus->ifs && !s->blk) {
2135 return;
2136 }
2137
2138 /* Only RESET is allowed while BSY and/or DRQ are set,
2139 * and only to ATAPI devices. */
2140 if (s->status & (BUSY_STAT|DRQ_STAT)) {
2141 if (val != WIN_DEVICE_RESET || s->drive_kind != IDE_CD) {
2142 return;
2143 }
2144 }
2145
2146 if (!ide_cmd_permitted(s, val)) {
2147 ide_abort_command(s);
2148 ide_set_irq(s->bus);
2149 return;
2150 }
2151
2152 s->status = READY_STAT | BUSY_STAT;
2153 s->error = 0;
2154 s->io_buffer_offset = 0;
2155
2156 complete = ide_cmd_table[val].handler(s, val);
2157 if (complete) {
2158 s->status &= ~BUSY_STAT;
2159 assert(!!s->error == !!(s->status & ERR_STAT));
2160
2161 if ((ide_cmd_table[val].flags & SET_DSC) && !s->error) {
2162 s->status |= SEEK_STAT;
2163 }
2164
2165 ide_cmd_done(s);
2166 ide_set_irq(s->bus);
2167 }
2168 }
2169
2170 /* IOport [R]ead [R]egisters */
2171 enum ATA_IOPORT_RR {
2172 ATA_IOPORT_RR_DATA = 0,
2173 ATA_IOPORT_RR_ERROR = 1,
2174 ATA_IOPORT_RR_SECTOR_COUNT = 2,
2175 ATA_IOPORT_RR_SECTOR_NUMBER = 3,
2176 ATA_IOPORT_RR_CYLINDER_LOW = 4,
2177 ATA_IOPORT_RR_CYLINDER_HIGH = 5,
2178 ATA_IOPORT_RR_DEVICE_HEAD = 6,
2179 ATA_IOPORT_RR_STATUS = 7,
2180 ATA_IOPORT_RR_NUM_REGISTERS,
2181 };
2182
2183 const char *ATA_IOPORT_RR_lookup[ATA_IOPORT_RR_NUM_REGISTERS] = {
2184 [ATA_IOPORT_RR_DATA] = "Data",
2185 [ATA_IOPORT_RR_ERROR] = "Error",
2186 [ATA_IOPORT_RR_SECTOR_COUNT] = "Sector Count",
2187 [ATA_IOPORT_RR_SECTOR_NUMBER] = "Sector Number",
2188 [ATA_IOPORT_RR_CYLINDER_LOW] = "Cylinder Low",
2189 [ATA_IOPORT_RR_CYLINDER_HIGH] = "Cylinder High",
2190 [ATA_IOPORT_RR_DEVICE_HEAD] = "Device/Head",
2191 [ATA_IOPORT_RR_STATUS] = "Status"
2192 };
2193
2194 uint32_t ide_ioport_read(void *opaque, uint32_t addr)
2195 {
2196 IDEBus *bus = opaque;
2197 IDEState *s = idebus_active_if(bus);
2198 uint32_t reg_num;
2199 int ret, hob;
2200
2201 reg_num = addr & 7;
2202 hob = bus->cmd & (IDE_CTRL_HOB);
2203 switch (reg_num) {
2204 case ATA_IOPORT_RR_DATA:
2205 /*
2206 * The pre-GRUB Solaris x86 bootloader relies upon inb
2207 * consuming a word from the drive's sector buffer.
2208 */
2209 ret = ide_data_readw(bus, addr) & 0xff;
2210 break;
2211 case ATA_IOPORT_RR_ERROR:
2212 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2213 (s != bus->ifs && !s->blk)) {
2214 ret = 0;
2215 } else if (!hob) {
2216 ret = s->error;
2217 } else {
2218 ret = s->hob_feature;
2219 }
2220 break;
2221 case ATA_IOPORT_RR_SECTOR_COUNT:
2222 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2223 ret = 0;
2224 } else if (!hob) {
2225 ret = s->nsector & 0xff;
2226 } else {
2227 ret = s->hob_nsector;
2228 }
2229 break;
2230 case ATA_IOPORT_RR_SECTOR_NUMBER:
2231 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2232 ret = 0;
2233 } else if (!hob) {
2234 ret = s->sector;
2235 } else {
2236 ret = s->hob_sector;
2237 }
2238 break;
2239 case ATA_IOPORT_RR_CYLINDER_LOW:
2240 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2241 ret = 0;
2242 } else if (!hob) {
2243 ret = s->lcyl;
2244 } else {
2245 ret = s->hob_lcyl;
2246 }
2247 break;
2248 case ATA_IOPORT_RR_CYLINDER_HIGH:
2249 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2250 ret = 0;
2251 } else if (!hob) {
2252 ret = s->hcyl;
2253 } else {
2254 ret = s->hob_hcyl;
2255 }
2256 break;
2257 case ATA_IOPORT_RR_DEVICE_HEAD:
2258 if (!bus->ifs[0].blk && !bus->ifs[1].blk) {
2259 ret = 0;
2260 } else {
2261 ret = s->select;
2262 }
2263 break;
2264 default:
2265 case ATA_IOPORT_RR_STATUS:
2266 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2267 (s != bus->ifs && !s->blk)) {
2268 ret = 0;
2269 } else {
2270 ret = s->status;
2271 }
2272 qemu_irq_lower(bus->irq);
2273 break;
2274 }
2275
2276 trace_ide_ioport_read(addr, ATA_IOPORT_RR_lookup[reg_num], ret, bus, s);
2277 return ret;
2278 }
2279
2280 uint32_t ide_status_read(void *opaque, uint32_t addr)
2281 {
2282 IDEBus *bus = opaque;
2283 IDEState *s = idebus_active_if(bus);
2284 int ret;
2285
2286 if ((!bus->ifs[0].blk && !bus->ifs[1].blk) ||
2287 (s != bus->ifs && !s->blk)) {
2288 ret = 0;
2289 } else {
2290 ret = s->status;
2291 }
2292
2293 trace_ide_status_read(addr, ret, bus, s);
2294 return ret;
2295 }
2296
2297 static void ide_perform_srst(IDEState *s)
2298 {
2299 s->status |= BUSY_STAT;
2300
2301 /* Halt PIO (Via register state); PIO BH remains scheduled. */
2302 ide_transfer_halt(s);
2303
2304 /* Cancel DMA -- may drain block device and invoke callbacks */
2305 ide_cancel_dma_sync(s);
2306
2307 /* Cancel PIO callback, reset registers/signature, etc */
2308 ide_reset(s);
2309
2310 /* perform diagnostic */
2311 cmd_exec_dev_diagnostic(s, WIN_DIAGNOSE);
2312 }
2313
2314 static void ide_bus_perform_srst(void *opaque)
2315 {
2316 IDEBus *bus = opaque;
2317 IDEState *s;
2318 int i;
2319
2320 for (i = 0; i < 2; i++) {
2321 s = &bus->ifs[i];
2322 ide_perform_srst(s);
2323 }
2324
2325 bus->cmd &= ~IDE_CTRL_RESET;
2326 }
2327
2328 void ide_ctrl_write(void *opaque, uint32_t addr, uint32_t val)
2329 {
2330 IDEBus *bus = opaque;
2331 IDEState *s;
2332 int i;
2333
2334 trace_ide_ctrl_write(addr, val, bus);
2335
2336 /* Device0 and Device1 each have their own control register,
2337 * but QEMU models it as just one register in the controller. */
2338 if (!(bus->cmd & IDE_CTRL_RESET) && (val & IDE_CTRL_RESET)) {
2339 for (i = 0; i < 2; i++) {
2340 s = &bus->ifs[i];
2341 s->status |= BUSY_STAT;
2342 }
2343 replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
2344 ide_bus_perform_srst, bus);
2345 }
2346
2347 bus->cmd = val;
2348 }
2349
2350 /*
2351 * Returns true if the running PIO transfer is a PIO out (i.e. data is
2352 * transferred from the device to the guest), false if it's a PIO in
2353 */
2354 static bool ide_is_pio_out(IDEState *s)
2355 {
2356 if (s->end_transfer_func == ide_sector_write ||
2357 s->end_transfer_func == ide_atapi_cmd) {
2358 return false;
2359 } else if (s->end_transfer_func == ide_sector_read ||
2360 s->end_transfer_func == ide_transfer_stop ||
2361 s->end_transfer_func == ide_atapi_cmd_reply_end ||
2362 s->end_transfer_func == ide_dummy_transfer_stop) {
2363 return true;
2364 }
2365
2366 abort();
2367 }
2368
2369 void ide_data_writew(void *opaque, uint32_t addr, uint32_t val)
2370 {
2371 IDEBus *bus = opaque;
2372 IDEState *s = idebus_active_if(bus);
2373 uint8_t *p;
2374
2375 trace_ide_data_writew(addr, val, bus, s);
2376
2377 /* PIO data access allowed only when DRQ bit is set. The result of a write
2378 * during PIO out is indeterminate, just ignore it. */
2379 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2380 return;
2381 }
2382
2383 p = s->data_ptr;
2384 if (s->io8) {
2385 if (p + 1 > s->data_end) {
2386 return;
2387 }
2388
2389 *p++ = val;
2390 } else {
2391 if (p + 2 > s->data_end) {
2392 return;
2393 }
2394
2395 *(uint16_t *)p = le16_to_cpu(val);
2396 p += 2;
2397 }
2398 s->data_ptr = p;
2399 if (p >= s->data_end) {
2400 s->status &= ~DRQ_STAT;
2401 s->end_transfer_func(s);
2402 }
2403 }
2404
2405 uint32_t ide_data_readw(void *opaque, uint32_t addr)
2406 {
2407 IDEBus *bus = opaque;
2408 IDEState *s = idebus_active_if(bus);
2409 uint8_t *p;
2410 int ret;
2411
2412 /* PIO data access allowed only when DRQ bit is set. The result of a read
2413 * during PIO in is indeterminate, return 0 and don't move forward. */
2414 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2415 return 0;
2416 }
2417
2418 p = s->data_ptr;
2419 if (s->io8) {
2420 if (p + 1 > s->data_end) {
2421 return 0;
2422 }
2423
2424 ret = *p++;
2425 } else {
2426 if (p + 2 > s->data_end) {
2427 return 0;
2428 }
2429
2430 ret = cpu_to_le16(*(uint16_t *)p);
2431 p += 2;
2432 }
2433 s->data_ptr = p;
2434 if (p >= s->data_end) {
2435 s->status &= ~DRQ_STAT;
2436 s->end_transfer_func(s);
2437 }
2438
2439 trace_ide_data_readw(addr, ret, bus, s);
2440 return ret;
2441 }
2442
2443 void ide_data_writel(void *opaque, uint32_t addr, uint32_t val)
2444 {
2445 IDEBus *bus = opaque;
2446 IDEState *s = idebus_active_if(bus);
2447 uint8_t *p;
2448
2449 trace_ide_data_writel(addr, val, bus, s);
2450
2451 /* PIO data access allowed only when DRQ bit is set. The result of a write
2452 * during PIO out is indeterminate, just ignore it. */
2453 if (!(s->status & DRQ_STAT) || ide_is_pio_out(s)) {
2454 return;
2455 }
2456
2457 p = s->data_ptr;
2458 if (p + 4 > s->data_end) {
2459 return;
2460 }
2461
2462 *(uint32_t *)p = le32_to_cpu(val);
2463 p += 4;
2464 s->data_ptr = p;
2465 if (p >= s->data_end) {
2466 s->status &= ~DRQ_STAT;
2467 s->end_transfer_func(s);
2468 }
2469 }
2470
2471 uint32_t ide_data_readl(void *opaque, uint32_t addr)
2472 {
2473 IDEBus *bus = opaque;
2474 IDEState *s = idebus_active_if(bus);
2475 uint8_t *p;
2476 int ret;
2477
2478 /* PIO data access allowed only when DRQ bit is set. The result of a read
2479 * during PIO in is indeterminate, return 0 and don't move forward. */
2480 if (!(s->status & DRQ_STAT) || !ide_is_pio_out(s)) {
2481 ret = 0;
2482 goto out;
2483 }
2484
2485 p = s->data_ptr;
2486 if (p + 4 > s->data_end) {
2487 return 0;
2488 }
2489
2490 ret = cpu_to_le32(*(uint32_t *)p);
2491 p += 4;
2492 s->data_ptr = p;
2493 if (p >= s->data_end) {
2494 s->status &= ~DRQ_STAT;
2495 s->end_transfer_func(s);
2496 }
2497
2498 out:
2499 trace_ide_data_readl(addr, ret, bus, s);
2500 return ret;
2501 }
2502
2503 static void ide_dummy_transfer_stop(IDEState *s)
2504 {
2505 s->data_ptr = s->io_buffer;
2506 s->data_end = s->io_buffer;
2507 s->io_buffer[0] = 0xff;
2508 s->io_buffer[1] = 0xff;
2509 s->io_buffer[2] = 0xff;
2510 s->io_buffer[3] = 0xff;
2511 }
2512
2513 void ide_bus_reset(IDEBus *bus)
2514 {
2515 bus->unit = 0;
2516 bus->cmd = 0;
2517 ide_reset(&bus->ifs[0]);
2518 ide_reset(&bus->ifs[1]);
2519 ide_clear_hob(bus);
2520
2521 /* pending async DMA */
2522 if (bus->dma->aiocb) {
2523 trace_ide_bus_reset_aio();
2524 blk_aio_cancel(bus->dma->aiocb);
2525 bus->dma->aiocb = NULL;
2526 }
2527
2528 /* reset dma provider too */
2529 if (bus->dma->ops->reset) {
2530 bus->dma->ops->reset(bus->dma);
2531 }
2532 }
2533
2534 static bool ide_cd_is_tray_open(void *opaque)
2535 {
2536 return ((IDEState *)opaque)->tray_open;
2537 }
2538
2539 static bool ide_cd_is_medium_locked(void *opaque)
2540 {
2541 return ((IDEState *)opaque)->tray_locked;
2542 }
2543
2544 static void ide_resize_cb(void *opaque)
2545 {
2546 IDEState *s = opaque;
2547 uint64_t nb_sectors;
2548
2549 if (!s->identify_set) {
2550 return;
2551 }
2552
2553 blk_get_geometry(s->blk, &nb_sectors);
2554 s->nb_sectors = nb_sectors;
2555
2556 /* Update the identify data buffer. */
2557 if (s->drive_kind == IDE_CFATA) {
2558 ide_cfata_identify_size(s);
2559 } else {
2560 /* IDE_CD uses a different set of callbacks entirely. */
2561 assert(s->drive_kind != IDE_CD);
2562 ide_identify_size(s);
2563 }
2564 }
2565
2566 static const BlockDevOps ide_cd_block_ops = {
2567 .change_media_cb = ide_cd_change_cb,
2568 .eject_request_cb = ide_cd_eject_request_cb,
2569 .is_tray_open = ide_cd_is_tray_open,
2570 .is_medium_locked = ide_cd_is_medium_locked,
2571 };
2572
2573 static const BlockDevOps ide_hd_block_ops = {
2574 .resize_cb = ide_resize_cb,
2575 };
2576
2577 int ide_init_drive(IDEState *s, BlockBackend *blk, IDEDriveKind kind,
2578 const char *version, const char *serial, const char *model,
2579 uint64_t wwn,
2580 uint32_t cylinders, uint32_t heads, uint32_t secs,
2581 int chs_trans, Error **errp)
2582 {
2583 uint64_t nb_sectors;
2584
2585 s->blk = blk;
2586 s->drive_kind = kind;
2587
2588 blk_get_geometry(blk, &nb_sectors);
2589 s->cylinders = cylinders;
2590 s->heads = s->drive_heads = heads;
2591 s->sectors = s->drive_sectors = secs;
2592 s->chs_trans = chs_trans;
2593 s->nb_sectors = nb_sectors;
2594 s->wwn = wwn;
2595 /* The SMART values should be preserved across power cycles
2596 but they aren't. */
2597 s->smart_enabled = 1;
2598 s->smart_autosave = 1;
2599 s->smart_errors = 0;
2600 s->smart_selftest_count = 0;
2601 if (kind == IDE_CD) {
2602 blk_set_dev_ops(blk, &ide_cd_block_ops, s);
2603 } else {
2604 if (!blk_is_inserted(s->blk)) {
2605 error_setg(errp, "Device needs media, but drive is empty");
2606 return -1;
2607 }
2608 if (!blk_is_writable(blk)) {
2609 error_setg(errp, "Can't use a read-only drive");
2610 return -1;
2611 }
2612 blk_set_dev_ops(blk, &ide_hd_block_ops, s);
2613 }
2614 if (serial) {
2615 pstrcpy(s->drive_serial_str, sizeof(s->drive_serial_str), serial);
2616 } else {
2617 snprintf(s->drive_serial_str, sizeof(s->drive_serial_str),
2618 "QM%05d", s->drive_serial);
2619 }
2620 if (model) {
2621 pstrcpy(s->drive_model_str, sizeof(s->drive_model_str), model);
2622 } else {
2623 switch (kind) {
2624 case IDE_CD:
2625 strcpy(s->drive_model_str, "QEMU DVD-ROM");
2626 break;
2627 case IDE_CFATA:
2628 strcpy(s->drive_model_str, "QEMU MICRODRIVE");
2629 break;
2630 default:
2631 strcpy(s->drive_model_str, "QEMU HARDDISK");
2632 break;
2633 }
2634 }
2635
2636 if (version) {
2637 pstrcpy(s->version, sizeof(s->version), version);
2638 } else {
2639 pstrcpy(s->version, sizeof(s->version), qemu_hw_version());
2640 }
2641
2642 ide_reset(s);
2643 blk_iostatus_enable(blk);
2644 return 0;
2645 }
2646
2647 static void ide_init1(IDEBus *bus, int unit)
2648 {
2649 static int drive_serial = 1;
2650 IDEState *s = &bus->ifs[unit];
2651
2652 s->bus = bus;
2653 s->unit = unit;
2654 s->drive_serial = drive_serial++;
2655 /* we need at least 2k alignment for accessing CDROMs using O_DIRECT */
2656 s->io_buffer_total_len = IDE_DMA_BUF_SECTORS*512 + 4;
2657 s->io_buffer = qemu_memalign(2048, s->io_buffer_total_len);
2658 memset(s->io_buffer, 0, s->io_buffer_total_len);
2659
2660 s->smart_selftest_data = blk_blockalign(s->blk, 512);
2661 memset(s->smart_selftest_data, 0, 512);
2662
2663 s->sector_write_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2664 ide_sector_write_timer_cb, s);
2665 }
2666
2667 static int ide_nop_int(const IDEDMA *dma, bool is_write)
2668 {
2669 return 0;
2670 }
2671
2672 static void ide_nop(const IDEDMA *dma)
2673 {
2674 }
2675
2676 static int32_t ide_nop_int32(const IDEDMA *dma, int32_t l)
2677 {
2678 return 0;
2679 }
2680
2681 static const IDEDMAOps ide_dma_nop_ops = {
2682 .prepare_buf = ide_nop_int32,
2683 .restart_dma = ide_nop,
2684 .rw_buf = ide_nop_int,
2685 };
2686
2687 static void ide_restart_dma(IDEState *s, enum ide_dma_cmd dma_cmd)
2688 {
2689 s->unit = s->bus->retry_unit;
2690 ide_set_sector(s, s->bus->retry_sector_num);
2691 s->nsector = s->bus->retry_nsector;
2692 s->bus->dma->ops->restart_dma(s->bus->dma);
2693 s->io_buffer_size = 0;
2694 s->dma_cmd = dma_cmd;
2695 ide_start_dma(s, ide_dma_cb);
2696 }
2697
2698 static void ide_restart_bh(void *opaque)
2699 {
2700 IDEBus *bus = opaque;
2701 IDEState *s;
2702 bool is_read;
2703 int error_status;
2704
2705 qemu_bh_delete(bus->bh);
2706 bus->bh = NULL;
2707
2708 error_status = bus->error_status;
2709 if (bus->error_status == 0) {
2710 return;
2711 }
2712
2713 s = idebus_active_if(bus);
2714 is_read = (bus->error_status & IDE_RETRY_READ) != 0;
2715
2716 /* The error status must be cleared before resubmitting the request: The
2717 * request may fail again, and this case can only be distinguished if the
2718 * called function can set a new error status. */
2719 bus->error_status = 0;
2720
2721 /* The HBA has generically asked to be kicked on retry */
2722 if (error_status & IDE_RETRY_HBA) {
2723 if (s->bus->dma->ops->restart) {
2724 s->bus->dma->ops->restart(s->bus->dma);
2725 }
2726 } else if (IS_IDE_RETRY_DMA(error_status)) {
2727 if (error_status & IDE_RETRY_TRIM) {
2728 ide_restart_dma(s, IDE_DMA_TRIM);
2729 } else {
2730 ide_restart_dma(s, is_read ? IDE_DMA_READ : IDE_DMA_WRITE);
2731 }
2732 } else if (IS_IDE_RETRY_PIO(error_status)) {
2733 if (is_read) {
2734 ide_sector_read(s);
2735 } else {
2736 ide_sector_write(s);
2737 }
2738 } else if (error_status & IDE_RETRY_FLUSH) {
2739 ide_flush_cache(s);
2740 } else if (IS_IDE_RETRY_ATAPI(error_status)) {
2741 assert(s->end_transfer_func == ide_atapi_cmd);
2742 ide_atapi_dma_restart(s);
2743 } else {
2744 abort();
2745 }
2746 }
2747
2748 static void ide_restart_cb(void *opaque, bool running, RunState state)
2749 {
2750 IDEBus *bus = opaque;
2751
2752 if (!running)
2753 return;
2754
2755 if (!bus->bh) {
2756 bus->bh = qemu_bh_new(ide_restart_bh, bus);
2757 qemu_bh_schedule(bus->bh);
2758 }
2759 }
2760
2761 void ide_register_restart_cb(IDEBus *bus)
2762 {
2763 if (bus->dma->ops->restart_dma) {
2764 bus->vmstate = qemu_add_vm_change_state_handler(ide_restart_cb, bus);
2765 }
2766 }
2767
2768 static IDEDMA ide_dma_nop = {
2769 .ops = &ide_dma_nop_ops,
2770 .aiocb = NULL,
2771 };
2772
2773 void ide_init2(IDEBus *bus, qemu_irq irq)
2774 {
2775 int i;
2776
2777 for(i = 0; i < 2; i++) {
2778 ide_init1(bus, i);
2779 ide_reset(&bus->ifs[i]);
2780 }
2781 bus->irq = irq;
2782 bus->dma = &ide_dma_nop;
2783 }
2784
2785 void ide_exit(IDEState *s)
2786 {
2787 timer_free(s->sector_write_timer);
2788 qemu_vfree(s->smart_selftest_data);
2789 qemu_vfree(s->io_buffer);
2790 }
2791
2792 static bool is_identify_set(void *opaque, int version_id)
2793 {
2794 IDEState *s = opaque;
2795
2796 return s->identify_set != 0;
2797 }
2798
2799 static EndTransferFunc* transfer_end_table[] = {
2800 ide_sector_read,
2801 ide_sector_write,
2802 ide_transfer_stop,
2803 ide_atapi_cmd_reply_end,
2804 ide_atapi_cmd,
2805 ide_dummy_transfer_stop,
2806 };
2807
2808 static int transfer_end_table_idx(EndTransferFunc *fn)
2809 {
2810 int i;
2811
2812 for (i = 0; i < ARRAY_SIZE(transfer_end_table); i++)
2813 if (transfer_end_table[i] == fn)
2814 return i;
2815
2816 return -1;
2817 }
2818
2819 static int ide_drive_post_load(void *opaque, int version_id)
2820 {
2821 IDEState *s = opaque;
2822
2823 if (s->blk && s->identify_set) {
2824 blk_set_enable_write_cache(s->blk, !!(s->identify_data[85] & (1 << 5)));
2825 }
2826 return 0;
2827 }
2828
2829 static int ide_drive_pio_post_load(void *opaque, int version_id)
2830 {
2831 IDEState *s = opaque;
2832
2833 if (s->end_transfer_fn_idx >= ARRAY_SIZE(transfer_end_table)) {
2834 return -EINVAL;
2835 }
2836 s->end_transfer_func = transfer_end_table[s->end_transfer_fn_idx];
2837 s->data_ptr = s->io_buffer + s->cur_io_buffer_offset;
2838 s->data_end = s->data_ptr + s->cur_io_buffer_len;
2839 s->atapi_dma = s->feature & 1; /* as per cmd_packet */
2840
2841 return 0;
2842 }
2843
2844 static int ide_drive_pio_pre_save(void *opaque)
2845 {
2846 IDEState *s = opaque;
2847 int idx;
2848
2849 s->cur_io_buffer_offset = s->data_ptr - s->io_buffer;
2850 s->cur_io_buffer_len = s->data_end - s->data_ptr;
2851
2852 idx = transfer_end_table_idx(s->end_transfer_func);
2853 if (idx == -1) {
2854 fprintf(stderr, "%s: invalid end_transfer_func for DRQ_STAT\n",
2855 __func__);
2856 s->end_transfer_fn_idx = 2;
2857 } else {
2858 s->end_transfer_fn_idx = idx;
2859 }
2860
2861 return 0;
2862 }
2863
2864 static bool ide_drive_pio_state_needed(void *opaque)
2865 {
2866 IDEState *s = opaque;
2867
2868 return ((s->status & DRQ_STAT) != 0)
2869 || (s->bus->error_status & IDE_RETRY_PIO);
2870 }
2871
2872 static bool ide_tray_state_needed(void *opaque)
2873 {
2874 IDEState *s = opaque;
2875
2876 return s->tray_open || s->tray_locked;
2877 }
2878
2879 static bool ide_atapi_gesn_needed(void *opaque)
2880 {
2881 IDEState *s = opaque;
2882
2883 return s->events.new_media || s->events.eject_request;
2884 }
2885
2886 static bool ide_error_needed(void *opaque)
2887 {
2888 IDEBus *bus = opaque;
2889
2890 return (bus->error_status != 0);
2891 }
2892
2893 /* Fields for GET_EVENT_STATUS_NOTIFICATION ATAPI command */
2894 static const VMStateDescription vmstate_ide_atapi_gesn_state = {
2895 .name ="ide_drive/atapi/gesn_state",
2896 .version_id = 1,
2897 .minimum_version_id = 1,
2898 .needed = ide_atapi_gesn_needed,
2899 .fields = (VMStateField[]) {
2900 VMSTATE_BOOL(events.new_media, IDEState),
2901 VMSTATE_BOOL(events.eject_request, IDEState),
2902 VMSTATE_END_OF_LIST()
2903 }
2904 };
2905
2906 static const VMStateDescription vmstate_ide_tray_state = {
2907 .name = "ide_drive/tray_state",
2908 .version_id = 1,
2909 .minimum_version_id = 1,
2910 .needed = ide_tray_state_needed,
2911 .fields = (VMStateField[]) {
2912 VMSTATE_BOOL(tray_open, IDEState),
2913 VMSTATE_BOOL(tray_locked, IDEState),
2914 VMSTATE_END_OF_LIST()
2915 }
2916 };
2917
2918 static const VMStateDescription vmstate_ide_drive_pio_state = {
2919 .name = "ide_drive/pio_state",
2920 .version_id = 1,
2921 .minimum_version_id = 1,
2922 .pre_save = ide_drive_pio_pre_save,
2923 .post_load = ide_drive_pio_post_load,
2924 .needed = ide_drive_pio_state_needed,
2925 .fields = (VMStateField[]) {
2926 VMSTATE_INT32(req_nb_sectors, IDEState),
2927 VMSTATE_VARRAY_INT32(io_buffer, IDEState, io_buffer_total_len, 1,
2928 vmstate_info_uint8, uint8_t),
2929 VMSTATE_INT32(cur_io_buffer_offset, IDEState),
2930 VMSTATE_INT32(cur_io_buffer_len, IDEState),
2931 VMSTATE_UINT8(end_transfer_fn_idx, IDEState),
2932 VMSTATE_INT32(elementary_transfer_size, IDEState),
2933 VMSTATE_INT32(packet_transfer_size, IDEState),
2934 VMSTATE_END_OF_LIST()
2935 }
2936 };
2937
2938 const VMStateDescription vmstate_ide_drive = {
2939 .name = "ide_drive",
2940 .version_id = 3,
2941 .minimum_version_id = 0,
2942 .post_load = ide_drive_post_load,
2943 .fields = (VMStateField[]) {
2944 VMSTATE_INT32(mult_sectors, IDEState),
2945 VMSTATE_INT32(identify_set, IDEState),
2946 VMSTATE_BUFFER_TEST(identify_data, IDEState, is_identify_set),
2947 VMSTATE_UINT8(feature, IDEState),
2948 VMSTATE_UINT8(error, IDEState),
2949 VMSTATE_UINT32(nsector, IDEState),
2950 VMSTATE_UINT8(sector, IDEState),
2951 VMSTATE_UINT8(lcyl, IDEState),
2952 VMSTATE_UINT8(hcyl, IDEState),
2953 VMSTATE_UINT8(hob_feature, IDEState),
2954 VMSTATE_UINT8(hob_sector, IDEState),
2955 VMSTATE_UINT8(hob_nsector, IDEState),
2956 VMSTATE_UINT8(hob_lcyl, IDEState),
2957 VMSTATE_UINT8(hob_hcyl, IDEState),
2958 VMSTATE_UINT8(select, IDEState),
2959 VMSTATE_UINT8(status, IDEState),
2960 VMSTATE_UINT8(lba48, IDEState),
2961 VMSTATE_UINT8(sense_key, IDEState),
2962 VMSTATE_UINT8(asc, IDEState),
2963 VMSTATE_UINT8_V(cdrom_changed, IDEState, 3),
2964 VMSTATE_END_OF_LIST()
2965 },
2966 .subsections = (const VMStateDescription*[]) {
2967 &vmstate_ide_drive_pio_state,
2968 &vmstate_ide_tray_state,
2969 &vmstate_ide_atapi_gesn_state,
2970 NULL
2971 }
2972 };
2973
2974 static const VMStateDescription vmstate_ide_error_status = {
2975 .name ="ide_bus/error",
2976 .version_id = 2,
2977 .minimum_version_id = 1,
2978 .needed = ide_error_needed,
2979 .fields = (VMStateField[]) {
2980 VMSTATE_INT32(error_status, IDEBus),
2981 VMSTATE_INT64_V(retry_sector_num, IDEBus, 2),
2982 VMSTATE_UINT32_V(retry_nsector, IDEBus, 2),
2983 VMSTATE_UINT8_V(retry_unit, IDEBus, 2),
2984 VMSTATE_END_OF_LIST()
2985 }
2986 };
2987
2988 const VMStateDescription vmstate_ide_bus = {
2989 .name = "ide_bus",
2990 .version_id = 1,
2991 .minimum_version_id = 1,
2992 .fields = (VMStateField[]) {
2993 VMSTATE_UINT8(cmd, IDEBus),
2994 VMSTATE_UINT8(unit, IDEBus),
2995 VMSTATE_END_OF_LIST()
2996 },
2997 .subsections = (const VMStateDescription*[]) {
2998 &vmstate_ide_error_status,
2999 NULL
3000 }
3001 };
3002
3003 void ide_drive_get(DriveInfo **hd, int n)
3004 {
3005 int i;
3006
3007 for (i = 0; i < n; i++) {
3008 hd[i] = drive_get_by_index(IF_IDE, i);
3009 }
3010 }