2 * s390 PCI instructions
4 * Copyright 2014 IBM Corp.
5 * Author(s): Frank Blaschka <frank.blaschka@de.ibm.com>
6 * Hong Bo Li <lihbbj@cn.ibm.com>
7 * Yi Min Zhao <zyimin@cn.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or (at
10 * your option) any later version. See the COPYING file in the top-level
14 #include "qemu/osdep.h"
16 #include "exec/memop.h"
17 #include "exec/memory-internal.h"
18 #include "qemu/error-report.h"
19 #include "sysemu/hw_accel.h"
20 #include "hw/s390x/s390-pci-inst.h"
21 #include "hw/s390x/s390-pci-bus.h"
22 #include "hw/s390x/tod.h"
24 #ifndef DEBUG_S390PCI_INST
25 #define DEBUG_S390PCI_INST 0
28 #define DPRINTF(fmt, ...) \
30 if (DEBUG_S390PCI_INST) { \
31 fprintf(stderr, "s390pci-inst: " fmt, ## __VA_ARGS__); \
35 static inline void inc_dma_avail(S390PCIIOMMU
*iommu
)
37 if (iommu
->dma_limit
) {
38 iommu
->dma_limit
->avail
++;
42 static inline void dec_dma_avail(S390PCIIOMMU
*iommu
)
44 if (iommu
->dma_limit
) {
45 iommu
->dma_limit
->avail
--;
49 static void s390_set_status_code(CPUS390XState
*env
,
50 uint8_t r
, uint64_t status_code
)
52 env
->regs
[r
] &= ~0xff000000ULL
;
53 env
->regs
[r
] |= (status_code
& 0xff) << 24;
56 static int list_pci(ClpReqRspListPci
*rrb
, uint8_t *cc
)
58 S390PCIBusDevice
*pbdev
= NULL
;
59 S390pciState
*s
= s390_get_phb();
60 uint32_t res_code
, initial_l2
, g_l2
;
62 uint64_t resume_token
;
65 if (lduw_p(&rrb
->request
.hdr
.len
) != 32) {
66 res_code
= CLP_RC_LEN
;
71 if ((ldl_p(&rrb
->request
.fmt
) & CLP_MASK_FMT
) != 0) {
72 res_code
= CLP_RC_FMT
;
77 if ((ldl_p(&rrb
->request
.fmt
) & ~CLP_MASK_FMT
) != 0 ||
78 ldq_p(&rrb
->request
.reserved1
) != 0) {
79 res_code
= CLP_RC_RESNOT0
;
84 resume_token
= ldq_p(&rrb
->request
.resume_token
);
87 pbdev
= s390_pci_find_dev_by_idx(s
, resume_token
);
89 res_code
= CLP_RC_LISTPCI_BADRT
;
94 pbdev
= s390_pci_find_next_avail_dev(s
, NULL
);
97 if (lduw_p(&rrb
->response
.hdr
.len
) < 48) {
103 initial_l2
= lduw_p(&rrb
->response
.hdr
.len
);
104 if ((initial_l2
- LIST_PCI_HDR_LEN
) % sizeof(ClpFhListEntry
)
106 res_code
= CLP_RC_LEN
;
112 stl_p(&rrb
->response
.fmt
, 0);
113 stq_p(&rrb
->response
.reserved1
, 0);
114 stl_p(&rrb
->response
.mdd
, FH_MASK_SHM
);
115 stw_p(&rrb
->response
.max_fn
, PCI_MAX_FUNCTIONS
);
116 rrb
->response
.flags
= UID_CHECKING_ENABLED
;
117 rrb
->response
.entry_size
= sizeof(ClpFhListEntry
);
120 g_l2
= LIST_PCI_HDR_LEN
;
121 while (g_l2
< initial_l2
&& pbdev
) {
122 stw_p(&rrb
->response
.fh_list
[i
].device_id
,
123 pci_get_word(pbdev
->pdev
->config
+ PCI_DEVICE_ID
));
124 stw_p(&rrb
->response
.fh_list
[i
].vendor_id
,
125 pci_get_word(pbdev
->pdev
->config
+ PCI_VENDOR_ID
));
126 /* Ignore RESERVED devices. */
127 stl_p(&rrb
->response
.fh_list
[i
].config
,
128 pbdev
->state
== ZPCI_FS_STANDBY ?
0 : 1 << 31);
129 stl_p(&rrb
->response
.fh_list
[i
].fid
, pbdev
->fid
);
130 stl_p(&rrb
->response
.fh_list
[i
].fh
, pbdev
->fh
);
132 g_l2
+= sizeof(ClpFhListEntry
);
133 /* Add endian check for DPRINTF? */
134 DPRINTF("g_l2 %d vendor id 0x%x device id 0x%x fid 0x%x fh 0x%x\n",
136 lduw_p(&rrb
->response
.fh_list
[i
].vendor_id
),
137 lduw_p(&rrb
->response
.fh_list
[i
].device_id
),
138 ldl_p(&rrb
->response
.fh_list
[i
].fid
),
139 ldl_p(&rrb
->response
.fh_list
[i
].fh
));
140 pbdev
= s390_pci_find_next_avail_dev(s
, pbdev
);
147 resume_token
= pbdev
->fh
& FH_MASK_INDEX
;
149 stq_p(&rrb
->response
.resume_token
, resume_token
);
150 stw_p(&rrb
->response
.hdr
.len
, g_l2
);
151 stw_p(&rrb
->response
.hdr
.rsp
, CLP_RC_OK
);
154 DPRINTF("list pci failed rc 0x%x\n", rc
);
155 stw_p(&rrb
->response
.hdr
.rsp
, res_code
);
160 int clp_service_call(S390CPU
*cpu
, uint8_t r2
, uintptr_t ra
)
164 S390PCIBusDevice
*pbdev
;
167 uint8_t buffer
[4096 * 2];
169 CPUS390XState
*env
= &cpu
->env
;
170 S390pciState
*s
= s390_get_phb();
173 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
174 s390_program_interrupt(env
, PGM_PRIVILEGED
, ra
);
178 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
, sizeof(*reqh
))) {
179 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
182 reqh
= (ClpReqHdr
*)buffer
;
183 req_len
= lduw_p(&reqh
->len
);
184 if (req_len
< 16 || req_len
> 8184 || (req_len
% 8 != 0)) {
185 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
189 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
,
190 req_len
+ sizeof(*resh
))) {
191 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
194 resh
= (ClpRspHdr
*)(buffer
+ req_len
);
195 res_len
= lduw_p(&resh
->len
);
196 if (res_len
< 8 || res_len
> 8176 || (res_len
% 8 != 0)) {
197 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
200 if ((req_len
+ res_len
) > 8192) {
201 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
205 if (s390_cpu_virt_mem_read(cpu
, env
->regs
[r2
], r2
, buffer
,
206 req_len
+ res_len
)) {
207 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
212 stw_p(&resh
->rsp
, CLP_RC_LEN
);
216 switch (lduw_p(&reqh
->cmd
)) {
218 ClpReqRspListPci
*rrb
= (ClpReqRspListPci
*)buffer
;
222 case CLP_SET_PCI_FN
: {
223 ClpReqSetPci
*reqsetpci
= (ClpReqSetPci
*)reqh
;
224 ClpRspSetPci
*ressetpci
= (ClpRspSetPci
*)resh
;
226 pbdev
= s390_pci_find_dev_by_fh(s
, ldl_p(&reqsetpci
->fh
));
228 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FH
);
232 switch (reqsetpci
->oc
) {
233 case CLP_SET_ENABLE_PCI_FN
:
234 switch (reqsetpci
->ndas
) {
236 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_DMAAS
);
241 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_RES
);
245 if (pbdev
->fh
& FH_MASK_ENABLE
) {
246 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
250 pbdev
->fh
|= FH_MASK_ENABLE
;
251 pbdev
->state
= ZPCI_FS_ENABLED
;
252 stl_p(&ressetpci
->fh
, pbdev
->fh
);
253 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_OK
);
255 case CLP_SET_DISABLE_PCI_FN
:
256 if (!(pbdev
->fh
& FH_MASK_ENABLE
)) {
257 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
260 device_legacy_reset(DEVICE(pbdev
));
261 pbdev
->fh
&= ~FH_MASK_ENABLE
;
262 pbdev
->state
= ZPCI_FS_DISABLED
;
263 stl_p(&ressetpci
->fh
, pbdev
->fh
);
264 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_OK
);
267 DPRINTF("unknown set pci command\n");
268 stw_p(&ressetpci
->hdr
.rsp
, CLP_RC_SETPCIFN_FHOP
);
273 case CLP_QUERY_PCI_FN
: {
274 ClpReqQueryPci
*reqquery
= (ClpReqQueryPci
*)reqh
;
275 ClpRspQueryPci
*resquery
= (ClpRspQueryPci
*)resh
;
277 pbdev
= s390_pci_find_dev_by_fh(s
, ldl_p(&reqquery
->fh
));
279 DPRINTF("query pci no pci dev\n");
280 stw_p(&resquery
->hdr
.rsp
, CLP_RC_SETPCIFN_FH
);
284 for (i
= 0; i
< PCI_BAR_COUNT
; i
++) {
285 uint32_t data
= pci_get_long(pbdev
->pdev
->config
+
286 PCI_BASE_ADDRESS_0
+ (i
* 4));
288 stl_p(&resquery
->bar
[i
], data
);
289 resquery
->bar_size
[i
] = pbdev
->pdev
->io_regions
[i
].size ?
290 ctz64(pbdev
->pdev
->io_regions
[i
].size
) : 0;
291 DPRINTF("bar %d addr 0x%x size 0x%" PRIx64
"barsize 0x%x\n", i
,
292 ldl_p(&resquery
->bar
[i
]),
293 pbdev
->pdev
->io_regions
[i
].size
,
294 resquery
->bar_size
[i
]);
297 stq_p(&resquery
->sdma
, ZPCI_SDMA_ADDR
);
298 stq_p(&resquery
->edma
, ZPCI_EDMA_ADDR
);
299 stl_p(&resquery
->fid
, pbdev
->fid
);
300 stw_p(&resquery
->pchid
, 0);
301 stw_p(&resquery
->ug
, ZPCI_DEFAULT_FN_GRP
);
302 stl_p(&resquery
->uid
, pbdev
->uid
);
303 stw_p(&resquery
->hdr
.rsp
, CLP_RC_OK
);
306 case CLP_QUERY_PCI_FNGRP
: {
307 ClpRspQueryPciGrp
*resgrp
= (ClpRspQueryPciGrp
*)resh
;
309 ClpReqQueryPciGrp
*reqgrp
= (ClpReqQueryPciGrp
*)reqh
;
312 group
= s390_group_find(reqgrp
->g
);
314 /* We do not allow access to unknown groups */
315 /* The group must have been obtained with a vfio device */
316 stw_p(&resgrp
->hdr
.rsp
, CLP_RC_QUERYPCIFG_PFGID
);
319 memcpy(resgrp
, &group
->zpci_group
, sizeof(ClpRspQueryPciGrp
));
320 stw_p(&resgrp
->hdr
.rsp
, CLP_RC_OK
);
324 DPRINTF("unknown clp command\n");
325 stw_p(&resh
->rsp
, CLP_RC_CMD
);
330 if (s390_cpu_virt_mem_write(cpu
, env
->regs
[r2
], r2
, buffer
,
331 req_len
+ res_len
)) {
332 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
340 * Swap data contained in s390x big endian registers to little endian
343 * @ptr: a pointer to a uint64_t data field
344 * @len: the length of the valid data, must be 1,2,4 or 8
346 static int zpci_endian_swap(uint64_t *ptr
, uint8_t len
)
348 uint64_t data
= *ptr
;
354 data
= bswap16(data
);
357 data
= bswap32(data
);
360 data
= bswap64(data
);
369 static MemoryRegion
*s390_get_subregion(MemoryRegion
*mr
, uint64_t offset
,
372 MemoryRegion
*subregion
;
373 uint64_t subregion_size
;
375 QTAILQ_FOREACH(subregion
, &mr
->subregions
, subregions_link
) {
376 subregion_size
= int128_get64(subregion
->size
);
377 if ((offset
>= subregion
->addr
) &&
378 (offset
+ len
) <= (subregion
->addr
+ subregion_size
)) {
386 static MemTxResult
zpci_read_bar(S390PCIBusDevice
*pbdev
, uint8_t pcias
,
387 uint64_t offset
, uint64_t *data
, uint8_t len
)
391 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
392 mr
= s390_get_subregion(mr
, offset
, len
);
394 return memory_region_dispatch_read(mr
, offset
, data
,
395 size_memop(len
) | MO_BE
,
396 MEMTXATTRS_UNSPECIFIED
);
399 int pcilg_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
401 CPUS390XState
*env
= &cpu
->env
;
402 S390PCIBusDevice
*pbdev
;
410 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
411 s390_program_interrupt(env
, PGM_PRIVILEGED
, ra
);
416 s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
420 fh
= env
->regs
[r2
] >> 32;
421 pcias
= (env
->regs
[r2
] >> 16) & 0xf;
422 len
= env
->regs
[r2
] & 0xf;
423 offset
= env
->regs
[r2
+ 1];
425 if (!(fh
& FH_MASK_ENABLE
)) {
426 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
430 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
432 DPRINTF("pcilg no pci dev\n");
433 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
437 switch (pbdev
->state
) {
438 case ZPCI_FS_PERMANENT_ERROR
:
440 setcc(cpu
, ZPCI_PCI_LS_ERR
);
441 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_BLOCKED
);
448 case ZPCI_IO_BAR_MIN
...ZPCI_IO_BAR_MAX
:
449 if (!len
|| (len
> (8 - (offset
& 0x7)))) {
450 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
453 result
= zpci_read_bar(pbdev
, pcias
, offset
, &data
, len
);
454 if (result
!= MEMTX_OK
) {
455 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
459 case ZPCI_CONFIG_BAR
:
460 if (!len
|| (len
> (4 - (offset
& 0x3))) || len
== 3) {
461 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
464 data
= pci_host_config_read_common(
465 pbdev
->pdev
, offset
, pci_config_size(pbdev
->pdev
), len
);
467 if (zpci_endian_swap(&data
, len
)) {
468 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
473 DPRINTF("pcilg invalid space\n");
474 setcc(cpu
, ZPCI_PCI_LS_ERR
);
475 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_INVAL_AS
);
479 pbdev
->fmb
.counter
[ZPCI_FMB_CNT_LD
]++;
481 env
->regs
[r1
] = data
;
482 setcc(cpu
, ZPCI_PCI_LS_OK
);
486 static MemTxResult
zpci_write_bar(S390PCIBusDevice
*pbdev
, uint8_t pcias
,
487 uint64_t offset
, uint64_t data
, uint8_t len
)
491 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
492 mr
= s390_get_subregion(mr
, offset
, len
);
494 return memory_region_dispatch_write(mr
, offset
, data
,
495 size_memop(len
) | MO_BE
,
496 MEMTXATTRS_UNSPECIFIED
);
499 int pcistg_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
501 CPUS390XState
*env
= &cpu
->env
;
502 uint64_t offset
, data
;
503 S390PCIBusDevice
*pbdev
;
509 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
510 s390_program_interrupt(env
, PGM_PRIVILEGED
, ra
);
515 s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
519 fh
= env
->regs
[r2
] >> 32;
520 pcias
= (env
->regs
[r2
] >> 16) & 0xf;
521 len
= env
->regs
[r2
] & 0xf;
522 offset
= env
->regs
[r2
+ 1];
523 data
= env
->regs
[r1
];
525 if (!(fh
& FH_MASK_ENABLE
)) {
526 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
530 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
532 DPRINTF("pcistg no pci dev\n");
533 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
537 switch (pbdev
->state
) {
538 /* ZPCI_FS_RESERVED, ZPCI_FS_STANDBY and ZPCI_FS_DISABLED
539 * are already covered by the FH_MASK_ENABLE check above
541 case ZPCI_FS_PERMANENT_ERROR
:
543 setcc(cpu
, ZPCI_PCI_LS_ERR
);
544 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_BLOCKED
);
551 /* A ZPCI PCI card may use any BAR from BAR 0 to BAR 5 */
552 case ZPCI_IO_BAR_MIN
...ZPCI_IO_BAR_MAX
:
554 * A length of 0 is invalid and length should not cross a double word
556 if (!len
|| (len
> (8 - (offset
& 0x7)))) {
557 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
561 result
= zpci_write_bar(pbdev
, pcias
, offset
, data
, len
);
562 if (result
!= MEMTX_OK
) {
563 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
567 case ZPCI_CONFIG_BAR
:
568 /* ZPCI uses the pseudo BAR number 15 as configuration space */
569 /* possible access lengths are 1,2,4 and must not cross a word */
570 if (!len
|| (len
> (4 - (offset
& 0x3))) || len
== 3) {
571 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
574 /* len = 1,2,4 so we do not need to test */
575 zpci_endian_swap(&data
, len
);
576 pci_host_config_write_common(pbdev
->pdev
, offset
,
577 pci_config_size(pbdev
->pdev
),
581 DPRINTF("pcistg invalid space\n");
582 setcc(cpu
, ZPCI_PCI_LS_ERR
);
583 s390_set_status_code(env
, r2
, ZPCI_PCI_ST_INVAL_AS
);
587 pbdev
->fmb
.counter
[ZPCI_FMB_CNT_ST
]++;
589 setcc(cpu
, ZPCI_PCI_LS_OK
);
593 static uint32_t s390_pci_update_iotlb(S390PCIIOMMU
*iommu
,
594 S390IOTLBEntry
*entry
)
596 S390IOTLBEntry
*cache
= g_hash_table_lookup(iommu
->iotlb
, &entry
->iova
);
597 IOMMUTLBEntry notify
= {
598 .target_as
= &address_space_memory
,
600 .translated_addr
= entry
->translated_addr
,
602 .addr_mask
= ~PAGE_MASK
,
605 if (entry
->perm
== IOMMU_NONE
) {
609 g_hash_table_remove(iommu
->iotlb
, &entry
->iova
);
610 inc_dma_avail(iommu
);
613 if (cache
->perm
== entry
->perm
&&
614 cache
->translated_addr
== entry
->translated_addr
) {
618 notify
.perm
= IOMMU_NONE
;
619 memory_region_notify_iommu(&iommu
->iommu_mr
, 0, notify
);
620 notify
.perm
= entry
->perm
;
623 cache
= g_new(S390IOTLBEntry
, 1);
624 cache
->iova
= entry
->iova
;
625 cache
->translated_addr
= entry
->translated_addr
;
626 cache
->len
= PAGE_SIZE
;
627 cache
->perm
= entry
->perm
;
628 g_hash_table_replace(iommu
->iotlb
, &cache
->iova
, cache
);
629 dec_dma_avail(iommu
);
632 memory_region_notify_iommu(&iommu
->iommu_mr
, 0, notify
);
635 return iommu
->dma_limit ? iommu
->dma_limit
->avail
: 1;
638 int rpcit_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r2
, uintptr_t ra
)
640 CPUS390XState
*env
= &cpu
->env
;
643 S390PCIBusDevice
*pbdev
;
645 S390IOTLBEntry entry
;
649 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
650 s390_program_interrupt(env
, PGM_PRIVILEGED
, ra
);
655 s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
659 fh
= env
->regs
[r1
] >> 32;
660 start
= env
->regs
[r2
];
661 end
= start
+ env
->regs
[r2
+ 1];
663 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
665 DPRINTF("rpcit no pci dev\n");
666 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
670 switch (pbdev
->state
) {
671 case ZPCI_FS_RESERVED
:
672 case ZPCI_FS_STANDBY
:
673 case ZPCI_FS_DISABLED
:
674 case ZPCI_FS_PERMANENT_ERROR
:
675 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
678 setcc(cpu
, ZPCI_PCI_LS_ERR
);
679 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_ERROR_RECOVER
);
685 iommu
= pbdev
->iommu
;
686 if (iommu
->dma_limit
) {
687 dma_avail
= iommu
->dma_limit
->avail
;
691 if (!iommu
->g_iota
) {
692 error
= ERR_EVENT_INVALAS
;
696 if (end
< iommu
->pba
|| start
> iommu
->pal
) {
697 error
= ERR_EVENT_OORANGE
;
701 while (start
< end
) {
702 error
= s390_guest_io_table_walk(iommu
->g_iota
, start
, &entry
);
708 while (entry
.iova
< start
&& entry
.iova
< end
&&
709 (dma_avail
> 0 || entry
.perm
== IOMMU_NONE
)) {
710 dma_avail
= s390_pci_update_iotlb(iommu
, &entry
);
711 entry
.iova
+= PAGE_SIZE
;
712 entry
.translated_addr
+= PAGE_SIZE
;
717 pbdev
->state
= ZPCI_FS_ERROR
;
718 setcc(cpu
, ZPCI_PCI_LS_ERR
);
719 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_FUNC_IN_ERR
);
720 s390_pci_generate_error_event(error
, pbdev
->fh
, pbdev
->fid
, start
, 0);
722 pbdev
->fmb
.counter
[ZPCI_FMB_CNT_RPCIT
]++;
724 setcc(cpu
, ZPCI_PCI_LS_OK
);
726 /* vfio DMA mappings are exhausted, trigger a RPCIT */
727 setcc(cpu
, ZPCI_PCI_LS_ERR
);
728 s390_set_status_code(env
, r1
, ZPCI_RPCIT_ST_INSUFF_RES
);
734 int pcistb_service_call(S390CPU
*cpu
, uint8_t r1
, uint8_t r3
, uint64_t gaddr
,
735 uint8_t ar
, uintptr_t ra
)
737 CPUS390XState
*env
= &cpu
->env
;
738 S390PCIBusDevice
*pbdev
;
748 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
749 s390_program_interrupt(env
, PGM_PRIVILEGED
, ra
);
753 fh
= env
->regs
[r1
] >> 32;
754 pcias
= (env
->regs
[r1
] >> 16) & 0xf;
755 len
= env
->regs
[r1
] & 0xff;
756 offset
= env
->regs
[r3
];
758 if (!(fh
& FH_MASK_ENABLE
)) {
759 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
763 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
765 DPRINTF("pcistb no pci dev fh 0x%x\n", fh
);
766 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
770 switch (pbdev
->state
) {
771 case ZPCI_FS_PERMANENT_ERROR
:
773 setcc(cpu
, ZPCI_PCI_LS_ERR
);
774 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_BLOCKED
);
780 if (pcias
> ZPCI_IO_BAR_MAX
) {
781 DPRINTF("pcistb invalid space\n");
782 setcc(cpu
, ZPCI_PCI_LS_ERR
);
783 s390_set_status_code(env
, r1
, ZPCI_PCI_ST_INVAL_AS
);
787 /* Verify the address, offset and length */
788 /* offset must be a multiple of 8 */
790 goto specification_error
;
792 /* Length must be greater than 8, a multiple of 8 */
793 /* and not greater than maxstbl */
794 if ((len
<= 8) || (len
% 8) ||
795 (len
> pbdev
->pci_group
->zpci_group
.maxstbl
)) {
796 goto specification_error
;
798 /* Do not cross a 4K-byte boundary */
799 if (((offset
& 0xfff) + len
) > 0x1000) {
800 goto specification_error
;
802 /* Guest address must be double word aligned */
803 if (gaddr
& 0x07UL
) {
804 goto specification_error
;
807 mr
= pbdev
->pdev
->io_regions
[pcias
].memory
;
808 mr
= s390_get_subregion(mr
, offset
, len
);
811 if (!memory_region_access_valid(mr
, offset
, len
, true,
812 MEMTXATTRS_UNSPECIFIED
)) {
813 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
817 if (s390_cpu_virt_mem_read(cpu
, gaddr
, ar
, buffer
, len
)) {
818 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
822 for (i
= 0; i
< len
/ 8; i
++) {
823 result
= memory_region_dispatch_write(mr
, offset
+ i
* 8,
824 ldq_p(buffer
+ i
* 8),
825 MO_64
, MEMTXATTRS_UNSPECIFIED
);
826 if (result
!= MEMTX_OK
) {
827 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
832 pbdev
->fmb
.counter
[ZPCI_FMB_CNT_STB
]++;
834 setcc(cpu
, ZPCI_PCI_LS_OK
);
838 s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
842 static int reg_irqs(CPUS390XState
*env
, S390PCIBusDevice
*pbdev
, ZpciFib fib
)
845 uint8_t isc
= FIB_DATA_ISC(ldl_p(&fib
.data
));
847 pbdev
->routes
.adapter
.adapter_id
= css_get_adapter_id(
848 CSS_IO_ADAPTER_PCI
, isc
);
849 pbdev
->summary_ind
= get_indicator(ldq_p(&fib
.aisb
), sizeof(uint64_t));
850 len
= BITS_TO_LONGS(FIB_DATA_NOI(ldl_p(&fib
.data
))) * sizeof(unsigned long);
851 pbdev
->indicator
= get_indicator(ldq_p(&fib
.aibv
), len
);
853 ret
= map_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
858 ret
= map_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
863 pbdev
->routes
.adapter
.summary_addr
= ldq_p(&fib
.aisb
);
864 pbdev
->routes
.adapter
.summary_offset
= FIB_DATA_AISBO(ldl_p(&fib
.data
));
865 pbdev
->routes
.adapter
.ind_addr
= ldq_p(&fib
.aibv
);
866 pbdev
->routes
.adapter
.ind_offset
= FIB_DATA_AIBVO(ldl_p(&fib
.data
));
868 pbdev
->noi
= FIB_DATA_NOI(ldl_p(&fib
.data
));
869 pbdev
->sum
= FIB_DATA_SUM(ldl_p(&fib
.data
));
871 DPRINTF("reg_irqs adapter id %d\n", pbdev
->routes
.adapter
.adapter_id
);
874 release_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
875 release_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
876 pbdev
->summary_ind
= NULL
;
877 pbdev
->indicator
= NULL
;
881 int pci_dereg_irqs(S390PCIBusDevice
*pbdev
)
883 release_indicator(&pbdev
->routes
.adapter
, pbdev
->summary_ind
);
884 release_indicator(&pbdev
->routes
.adapter
, pbdev
->indicator
);
886 pbdev
->summary_ind
= NULL
;
887 pbdev
->indicator
= NULL
;
888 pbdev
->routes
.adapter
.summary_addr
= 0;
889 pbdev
->routes
.adapter
.summary_offset
= 0;
890 pbdev
->routes
.adapter
.ind_addr
= 0;
891 pbdev
->routes
.adapter
.ind_offset
= 0;
896 DPRINTF("dereg_irqs adapter id %d\n", pbdev
->routes
.adapter
.adapter_id
);
900 static int reg_ioat(CPUS390XState
*env
, S390PCIIOMMU
*iommu
, ZpciFib fib
,
903 uint64_t pba
= ldq_p(&fib
.pba
);
904 uint64_t pal
= ldq_p(&fib
.pal
);
905 uint64_t g_iota
= ldq_p(&fib
.iota
);
906 uint8_t dt
= (g_iota
>> 2) & 0x7;
907 uint8_t t
= (g_iota
>> 11) & 0x1;
911 if (pba
> pal
|| pba
< ZPCI_SDMA_ADDR
|| pal
> ZPCI_EDMA_ADDR
) {
912 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
916 /* currently we only support designation type 1 with translation */
917 if (!(dt
== ZPCI_IOTA_RTTO
&& t
)) {
918 error_report("unsupported ioat dt %d t %d", dt
, t
);
919 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
925 iommu
->g_iota
= g_iota
;
927 s390_pci_iommu_enable(iommu
);
932 void pci_dereg_ioat(S390PCIIOMMU
*iommu
)
934 s390_pci_iommu_disable(iommu
);
940 void fmb_timer_free(S390PCIBusDevice
*pbdev
)
942 if (pbdev
->fmb_timer
) {
943 timer_del(pbdev
->fmb_timer
);
944 timer_free(pbdev
->fmb_timer
);
945 pbdev
->fmb_timer
= NULL
;
948 memset(&pbdev
->fmb
, 0, sizeof(ZpciFmb
));
951 static int fmb_do_update(S390PCIBusDevice
*pbdev
, int offset
, uint64_t val
,
955 uint64_t dst
= pbdev
->fmb_addr
+ offset
;
959 address_space_stq_be(&address_space_memory
, dst
, val
,
960 MEMTXATTRS_UNSPECIFIED
,
964 address_space_stl_be(&address_space_memory
, dst
, val
,
965 MEMTXATTRS_UNSPECIFIED
,
969 address_space_stw_be(&address_space_memory
, dst
, val
,
970 MEMTXATTRS_UNSPECIFIED
,
974 address_space_stb(&address_space_memory
, dst
, val
,
975 MEMTXATTRS_UNSPECIFIED
,
982 if (ret
!= MEMTX_OK
) {
983 s390_pci_generate_error_event(ERR_EVENT_FMBA
, pbdev
->fh
, pbdev
->fid
,
985 fmb_timer_free(pbdev
);
991 static void fmb_update(void *opaque
)
993 S390PCIBusDevice
*pbdev
= opaque
;
994 int64_t t
= qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
);
998 pbdev
->fmb
.last_update
*= 2;
999 pbdev
->fmb
.last_update
|= UPDATE_U_BIT
;
1000 if (fmb_do_update(pbdev
, offsetof(ZpciFmb
, last_update
),
1001 pbdev
->fmb
.last_update
,
1002 sizeof(pbdev
->fmb
.last_update
))) {
1006 /* Update FMB sample count */
1007 if (fmb_do_update(pbdev
, offsetof(ZpciFmb
, sample
),
1008 pbdev
->fmb
.sample
++,
1009 sizeof(pbdev
->fmb
.sample
))) {
1013 /* Update FMB counters */
1014 for (i
= 0; i
< ZPCI_FMB_CNT_MAX
; i
++) {
1015 if (fmb_do_update(pbdev
, offsetof(ZpciFmb
, counter
[i
]),
1016 pbdev
->fmb
.counter
[i
],
1017 sizeof(pbdev
->fmb
.counter
[0]))) {
1022 /* Clear U bit and update the time */
1023 pbdev
->fmb
.last_update
= time2tod(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
));
1024 pbdev
->fmb
.last_update
*= 2;
1025 if (fmb_do_update(pbdev
, offsetof(ZpciFmb
, last_update
),
1026 pbdev
->fmb
.last_update
,
1027 sizeof(pbdev
->fmb
.last_update
))) {
1030 timer_mod(pbdev
->fmb_timer
, t
+ DEFAULT_MUI
);
1033 int mpcifc_service_call(S390CPU
*cpu
, uint8_t r1
, uint64_t fiba
, uint8_t ar
,
1036 CPUS390XState
*env
= &cpu
->env
;
1040 S390PCIBusDevice
*pbdev
;
1041 uint64_t cc
= ZPCI_PCI_LS_OK
;
1043 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
1044 s390_program_interrupt(env
, PGM_PRIVILEGED
, ra
);
1048 oc
= env
->regs
[r1
] & 0xff;
1049 dmaas
= (env
->regs
[r1
] >> 16) & 0xff;
1050 fh
= env
->regs
[r1
] >> 32;
1053 s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
1057 pbdev
= s390_pci_find_dev_by_fh(s390_get_phb(), fh
);
1059 DPRINTF("mpcifc no pci dev fh 0x%x\n", fh
);
1060 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1064 switch (pbdev
->state
) {
1065 case ZPCI_FS_RESERVED
:
1066 case ZPCI_FS_STANDBY
:
1067 case ZPCI_FS_DISABLED
:
1068 case ZPCI_FS_PERMANENT_ERROR
:
1069 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1075 if (s390_cpu_virt_mem_read(cpu
, fiba
, ar
, (uint8_t *)&fib
, sizeof(fib
))) {
1076 s390_cpu_virt_mem_handle_exc(cpu
, ra
);
1081 s390_program_interrupt(env
, PGM_OPERAND
, ra
);
1086 case ZPCI_MOD_FC_REG_INT
:
1087 if (pbdev
->summary_ind
) {
1088 cc
= ZPCI_PCI_LS_ERR
;
1089 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1090 } else if (reg_irqs(env
, pbdev
, fib
)) {
1091 cc
= ZPCI_PCI_LS_ERR
;
1092 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_RES_NOT_AVAIL
);
1095 case ZPCI_MOD_FC_DEREG_INT
:
1096 if (!pbdev
->summary_ind
) {
1097 cc
= ZPCI_PCI_LS_ERR
;
1098 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1100 pci_dereg_irqs(pbdev
);
1103 case ZPCI_MOD_FC_REG_IOAT
:
1105 cc
= ZPCI_PCI_LS_ERR
;
1106 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
1107 } else if (pbdev
->iommu
->enabled
) {
1108 cc
= ZPCI_PCI_LS_ERR
;
1109 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1110 } else if (reg_ioat(env
, pbdev
->iommu
, fib
, ra
)) {
1111 cc
= ZPCI_PCI_LS_ERR
;
1112 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_INSUF_RES
);
1115 case ZPCI_MOD_FC_DEREG_IOAT
:
1117 cc
= ZPCI_PCI_LS_ERR
;
1118 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
1119 } else if (!pbdev
->iommu
->enabled
) {
1120 cc
= ZPCI_PCI_LS_ERR
;
1121 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1123 pci_dereg_ioat(pbdev
->iommu
);
1126 case ZPCI_MOD_FC_REREG_IOAT
:
1128 cc
= ZPCI_PCI_LS_ERR
;
1129 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_DMAAS_INVAL
);
1130 } else if (!pbdev
->iommu
->enabled
) {
1131 cc
= ZPCI_PCI_LS_ERR
;
1132 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1134 pci_dereg_ioat(pbdev
->iommu
);
1135 if (reg_ioat(env
, pbdev
->iommu
, fib
, ra
)) {
1136 cc
= ZPCI_PCI_LS_ERR
;
1137 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_INSUF_RES
);
1141 case ZPCI_MOD_FC_RESET_ERROR
:
1142 switch (pbdev
->state
) {
1143 case ZPCI_FS_BLOCKED
:
1145 pbdev
->state
= ZPCI_FS_ENABLED
;
1148 cc
= ZPCI_PCI_LS_ERR
;
1149 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1152 case ZPCI_MOD_FC_RESET_BLOCK
:
1153 switch (pbdev
->state
) {
1155 pbdev
->state
= ZPCI_FS_BLOCKED
;
1158 cc
= ZPCI_PCI_LS_ERR
;
1159 s390_set_status_code(env
, r1
, ZPCI_MOD_ST_SEQUENCE
);
1162 case ZPCI_MOD_FC_SET_MEASURE
: {
1163 uint64_t fmb_addr
= ldq_p(&fib
.fmb_addr
);
1165 if (fmb_addr
& FMBK_MASK
) {
1166 cc
= ZPCI_PCI_LS_ERR
;
1167 s390_pci_generate_error_event(ERR_EVENT_FMBPRO
, pbdev
->fh
,
1168 pbdev
->fid
, fmb_addr
, 0);
1169 fmb_timer_free(pbdev
);
1174 /* Stop updating FMB. */
1175 fmb_timer_free(pbdev
);
1179 if (!pbdev
->fmb_timer
) {
1180 pbdev
->fmb_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
1182 } else if (timer_pending(pbdev
->fmb_timer
)) {
1183 /* Remove pending timer to update FMB address. */
1184 timer_del(pbdev
->fmb_timer
);
1186 pbdev
->fmb_addr
= fmb_addr
;
1187 timer_mod(pbdev
->fmb_timer
,
1188 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) + DEFAULT_MUI
);
1192 s390_program_interrupt(&cpu
->env
, PGM_OPERAND
, ra
);
1193 cc
= ZPCI_PCI_LS_ERR
;
1200 int stpcifc_service_call(S390CPU
*cpu
, uint8_t r1
, uint64_t fiba
, uint8_t ar
,
1203 CPUS390XState
*env
= &cpu
->env
;
1207 S390PCIBusDevice
*pbdev
;
1209 uint64_t cc
= ZPCI_PCI_LS_OK
;
1211 if (env
->psw
.mask
& PSW_MASK_PSTATE
) {
1212 s390_program_interrupt(env
, PGM_PRIVILEGED
, ra
);
1216 fh
= env
->regs
[r1
] >> 32;
1217 dmaas
= (env
->regs
[r1
] >> 16) & 0xff;
1220 setcc(cpu
, ZPCI_PCI_LS_ERR
);
1221 s390_set_status_code(env
, r1
, ZPCI_STPCIFC_ST_INVAL_DMAAS
);
1226 s390_program_interrupt(env
, PGM_SPECIFICATION
, ra
);
1230 pbdev
= s390_pci_find_dev_by_idx(s390_get_phb(), fh
& FH_MASK_INDEX
);
1232 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1236 memset(&fib
, 0, sizeof(fib
));
1238 switch (pbdev
->state
) {
1239 case ZPCI_FS_RESERVED
:
1240 case ZPCI_FS_STANDBY
:
1241 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1243 case ZPCI_FS_DISABLED
:
1244 if (fh
& FH_MASK_ENABLE
) {
1245 setcc(cpu
, ZPCI_PCI_LS_INVAL_HANDLE
);
1249 /* BLOCKED bit is set to one coincident with the setting of ERROR bit.
1250 * FH Enabled bit is set to one in states of ENABLED, BLOCKED or ERROR. */
1254 case ZPCI_FS_BLOCKED
:
1257 case ZPCI_FS_ENABLED
:
1259 if (pbdev
->iommu
->enabled
) {
1262 if (!(fh
& FH_MASK_ENABLE
)) {
1263 env
->regs
[r1
] |= 1ULL << 63;
1266 case ZPCI_FS_PERMANENT_ERROR
:
1267 setcc(cpu
, ZPCI_PCI_LS_ERR
);
1268 s390_set_status_code(env
, r1
, ZPCI_STPCIFC_ST_PERM_ERROR
);
1272 stq_p(&fib
.pba
, pbdev
->iommu
->pba
);
1273 stq_p(&fib
.pal
, pbdev
->iommu
->pal
);
1274 stq_p(&fib
.iota
, pbdev
->iommu
->g_iota
);
1275 stq_p(&fib
.aibv
, pbdev
->routes
.adapter
.ind_addr
);
1276 stq_p(&fib
.aisb
, pbdev
->routes
.adapter
.summary_addr
);
1277 stq_p(&fib
.fmb_addr
, pbdev
->fmb_addr
);
1279 data
= ((uint32_t)pbdev
->isc
<< 28) | ((uint32_t)pbdev
->noi
<< 16) |
1280 ((uint32_t)pbdev
->routes
.adapter
.ind_offset
<< 8) |
1281 ((uint32_t)pbdev
->sum
<< 7) | pbdev
->routes
.adapter
.summary_offset
;
1282 stl_p(&fib
.data
, data
);
1285 if (s390_cpu_virt_mem_write(cpu
, fiba
, ar
, (uint8_t *)&fib
, sizeof(fib
))) {
1286 s390_cpu_virt_mem_handle_exc(cpu
, ra
);