[process] Include process name in debug messages
[ipxe.git] / src / include / xen / memory.h
1 /******************************************************************************
2 * memory.h
3 *
4 * Memory reservation and information.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
25 */
26
27 #ifndef __XEN_PUBLIC_MEMORY_H__
28 #define __XEN_PUBLIC_MEMORY_H__
29
30 FILE_LICENCE ( MIT );
31
32 #include "xen.h"
33
34 /*
35 * Increase or decrease the specified domain's memory reservation. Returns the
36 * number of extents successfully allocated or freed.
37 * arg == addr of struct xen_memory_reservation.
38 */
39 #define XENMEM_increase_reservation 0
40 #define XENMEM_decrease_reservation 1
41 #define XENMEM_populate_physmap 6
42
43 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
44 /*
45 * Maximum # bits addressable by the user of the allocated region (e.g., I/O
46 * devices often have a 32-bit limitation even in 64-bit systems). If zero
47 * then the user has no addressing restriction. This field is not used by
48 * XENMEM_decrease_reservation.
49 */
50 #define XENMEMF_address_bits(x) (x)
51 #define XENMEMF_get_address_bits(x) ((x) & 0xffu)
52 /* NUMA node to allocate from. */
53 #define XENMEMF_node(x) (((x) + 1) << 8)
54 #define XENMEMF_get_node(x) ((((x) >> 8) - 1) & 0xffu)
55 /* Flag to populate physmap with populate-on-demand entries */
56 #define XENMEMF_populate_on_demand (1<<16)
57 /* Flag to request allocation only from the node specified */
58 #define XENMEMF_exact_node_request (1<<17)
59 #define XENMEMF_exact_node(n) (XENMEMF_node(n) | XENMEMF_exact_node_request)
60 #endif
61
62 struct xen_memory_reservation {
63
64 /*
65 * XENMEM_increase_reservation:
66 * OUT: MFN (*not* GMFN) bases of extents that were allocated
67 * XENMEM_decrease_reservation:
68 * IN: GMFN bases of extents to free
69 * XENMEM_populate_physmap:
70 * IN: GPFN bases of extents to populate with memory
71 * OUT: GMFN bases of extents that were allocated
72 * (NB. This command also updates the mach_to_phys translation table)
73 * XENMEM_claim_pages:
74 * IN: must be zero
75 */
76 XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
77
78 /* Number of extents, and size/alignment of each (2^extent_order pages). */
79 xen_ulong_t nr_extents;
80 unsigned int extent_order;
81
82 #if __XEN_INTERFACE_VERSION__ >= 0x00030209
83 /* XENMEMF flags. */
84 unsigned int mem_flags;
85 #else
86 unsigned int address_bits;
87 #endif
88
89 /*
90 * Domain whose reservation is being changed.
91 * Unprivileged domains can specify only DOMID_SELF.
92 */
93 domid_t domid;
94 };
95 typedef struct xen_memory_reservation xen_memory_reservation_t;
96 DEFINE_XEN_GUEST_HANDLE(xen_memory_reservation_t);
97
98 /*
99 * An atomic exchange of memory pages. If return code is zero then
100 * @out.extent_list provides GMFNs of the newly-allocated memory.
101 * Returns zero on complete success, otherwise a negative error code.
102 * On complete success then always @nr_exchanged == @in.nr_extents.
103 * On partial success @nr_exchanged indicates how much work was done.
104 */
105 #define XENMEM_exchange 11
106 struct xen_memory_exchange {
107 /*
108 * [IN] Details of memory extents to be exchanged (GMFN bases).
109 * Note that @in.address_bits is ignored and unused.
110 */
111 struct xen_memory_reservation in;
112
113 /*
114 * [IN/OUT] Details of new memory extents.
115 * We require that:
116 * 1. @in.domid == @out.domid
117 * 2. @in.nr_extents << @in.extent_order ==
118 * @out.nr_extents << @out.extent_order
119 * 3. @in.extent_start and @out.extent_start lists must not overlap
120 * 4. @out.extent_start lists GPFN bases to be populated
121 * 5. @out.extent_start is overwritten with allocated GMFN bases
122 */
123 struct xen_memory_reservation out;
124
125 /*
126 * [OUT] Number of input extents that were successfully exchanged:
127 * 1. The first @nr_exchanged input extents were successfully
128 * deallocated.
129 * 2. The corresponding first entries in the output extent list correctly
130 * indicate the GMFNs that were successfully exchanged.
131 * 3. All other input and output extents are untouched.
132 * 4. If not all input exents are exchanged then the return code of this
133 * command will be non-zero.
134 * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
135 */
136 xen_ulong_t nr_exchanged;
137 };
138 typedef struct xen_memory_exchange xen_memory_exchange_t;
139 DEFINE_XEN_GUEST_HANDLE(xen_memory_exchange_t);
140
141 /*
142 * Returns the maximum machine frame number of mapped RAM in this system.
143 * This command always succeeds (it never returns an error code).
144 * arg == NULL.
145 */
146 #define XENMEM_maximum_ram_page 2
147
148 /*
149 * Returns the current or maximum memory reservation, in pages, of the
150 * specified domain (may be DOMID_SELF). Returns -ve errcode on failure.
151 * arg == addr of domid_t.
152 */
153 #define XENMEM_current_reservation 3
154 #define XENMEM_maximum_reservation 4
155
156 /*
157 * Returns the maximum GPFN in use by the guest, or -ve errcode on failure.
158 */
159 #define XENMEM_maximum_gpfn 14
160
161 /*
162 * Returns a list of MFN bases of 2MB extents comprising the machine_to_phys
163 * mapping table. Architectures which do not have a m2p table do not implement
164 * this command.
165 * arg == addr of xen_machphys_mfn_list_t.
166 */
167 #define XENMEM_machphys_mfn_list 5
168 struct xen_machphys_mfn_list {
169 /*
170 * Size of the 'extent_start' array. Fewer entries will be filled if the
171 * machphys table is smaller than max_extents * 2MB.
172 */
173 unsigned int max_extents;
174
175 /*
176 * Pointer to buffer to fill with list of extent starts. If there are
177 * any large discontiguities in the machine address space, 2MB gaps in
178 * the machphys table will be represented by an MFN base of zero.
179 */
180 XEN_GUEST_HANDLE(xen_pfn_t) extent_start;
181
182 /*
183 * Number of extents written to the above array. This will be smaller
184 * than 'max_extents' if the machphys table is smaller than max_e * 2MB.
185 */
186 unsigned int nr_extents;
187 };
188 typedef struct xen_machphys_mfn_list xen_machphys_mfn_list_t;
189 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mfn_list_t);
190
191 /*
192 * For a compat caller, this is identical to XENMEM_machphys_mfn_list.
193 *
194 * For a non compat caller, this functions similarly to
195 * XENMEM_machphys_mfn_list, but returns the mfns making up the compatibility
196 * m2p table.
197 */
198 #define XENMEM_machphys_compat_mfn_list 25
199
200 /*
201 * Returns the location in virtual address space of the machine_to_phys
202 * mapping table. Architectures which do not have a m2p table, or which do not
203 * map it by default into guest address space, do not implement this command.
204 * arg == addr of xen_machphys_mapping_t.
205 */
206 #define XENMEM_machphys_mapping 12
207 struct xen_machphys_mapping {
208 xen_ulong_t v_start, v_end; /* Start and end virtual addresses. */
209 xen_ulong_t max_mfn; /* Maximum MFN that can be looked up. */
210 };
211 typedef struct xen_machphys_mapping xen_machphys_mapping_t;
212 DEFINE_XEN_GUEST_HANDLE(xen_machphys_mapping_t);
213
214 /* Source mapping space. */
215 /* ` enum phys_map_space { */
216 #define XENMAPSPACE_shared_info 0 /* shared info page */
217 #define XENMAPSPACE_grant_table 1 /* grant table page */
218 #define XENMAPSPACE_gmfn 2 /* GMFN */
219 #define XENMAPSPACE_gmfn_range 3 /* GMFN range, XENMEM_add_to_physmap only. */
220 #define XENMAPSPACE_gmfn_foreign 4 /* GMFN from another dom,
221 * XENMEM_add_to_physmap_batch only. */
222 /* ` } */
223
224 /*
225 * Sets the GPFN at which a particular page appears in the specified guest's
226 * pseudophysical address space.
227 * arg == addr of xen_add_to_physmap_t.
228 */
229 #define XENMEM_add_to_physmap 7
230 struct xen_add_to_physmap {
231 /* Which domain to change the mapping for. */
232 domid_t domid;
233
234 /* Number of pages to go through for gmfn_range */
235 uint16_t size;
236
237 unsigned int space; /* => enum phys_map_space */
238
239 #define XENMAPIDX_grant_table_status 0x80000000
240
241 /* Index into space being mapped. */
242 xen_ulong_t idx;
243
244 /* GPFN in domid where the source mapping page should appear. */
245 xen_pfn_t gpfn;
246 };
247 typedef struct xen_add_to_physmap xen_add_to_physmap_t;
248 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_t);
249
250 /* A batched version of add_to_physmap. */
251 #define XENMEM_add_to_physmap_batch 23
252 struct xen_add_to_physmap_batch {
253 /* IN */
254 /* Which domain to change the mapping for. */
255 domid_t domid;
256 uint16_t space; /* => enum phys_map_space */
257
258 /* Number of pages to go through */
259 uint16_t size;
260 domid_t foreign_domid; /* IFF gmfn_foreign */
261
262 /* Indexes into space being mapped. */
263 XEN_GUEST_HANDLE(xen_ulong_t) idxs;
264
265 /* GPFN in domid where the source mapping page should appear. */
266 XEN_GUEST_HANDLE(xen_pfn_t) gpfns;
267
268 /* OUT */
269
270 /* Per index error code. */
271 XEN_GUEST_HANDLE(int) errs;
272 };
273 typedef struct xen_add_to_physmap_batch xen_add_to_physmap_batch_t;
274 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_batch_t);
275
276 #if __XEN_INTERFACE_VERSION__ < 0x00040400
277 #define XENMEM_add_to_physmap_range XENMEM_add_to_physmap_batch
278 #define xen_add_to_physmap_range xen_add_to_physmap_batch
279 typedef struct xen_add_to_physmap_batch xen_add_to_physmap_range_t;
280 DEFINE_XEN_GUEST_HANDLE(xen_add_to_physmap_range_t);
281 #endif
282
283 /*
284 * Unmaps the page appearing at a particular GPFN from the specified guest's
285 * pseudophysical address space.
286 * arg == addr of xen_remove_from_physmap_t.
287 */
288 #define XENMEM_remove_from_physmap 15
289 struct xen_remove_from_physmap {
290 /* Which domain to change the mapping for. */
291 domid_t domid;
292
293 /* GPFN of the current mapping of the page. */
294 xen_pfn_t gpfn;
295 };
296 typedef struct xen_remove_from_physmap xen_remove_from_physmap_t;
297 DEFINE_XEN_GUEST_HANDLE(xen_remove_from_physmap_t);
298
299 /*** REMOVED ***/
300 /*#define XENMEM_translate_gpfn_list 8*/
301
302 /*
303 * Returns the pseudo-physical memory map as it was when the domain
304 * was started (specified by XENMEM_set_memory_map).
305 * arg == addr of xen_memory_map_t.
306 */
307 #define XENMEM_memory_map 9
308 struct xen_memory_map {
309 /*
310 * On call the number of entries which can be stored in buffer. On
311 * return the number of entries which have been stored in
312 * buffer.
313 */
314 unsigned int nr_entries;
315
316 /*
317 * Entries in the buffer are in the same format as returned by the
318 * BIOS INT 0x15 EAX=0xE820 call.
319 */
320 XEN_GUEST_HANDLE(void) buffer;
321 };
322 typedef struct xen_memory_map xen_memory_map_t;
323 DEFINE_XEN_GUEST_HANDLE(xen_memory_map_t);
324
325 /*
326 * Returns the real physical memory map. Passes the same structure as
327 * XENMEM_memory_map.
328 * arg == addr of xen_memory_map_t.
329 */
330 #define XENMEM_machine_memory_map 10
331
332 /*
333 * Set the pseudo-physical memory map of a domain, as returned by
334 * XENMEM_memory_map.
335 * arg == addr of xen_foreign_memory_map_t.
336 */
337 #define XENMEM_set_memory_map 13
338 struct xen_foreign_memory_map {
339 domid_t domid;
340 struct xen_memory_map map;
341 };
342 typedef struct xen_foreign_memory_map xen_foreign_memory_map_t;
343 DEFINE_XEN_GUEST_HANDLE(xen_foreign_memory_map_t);
344
345 #define XENMEM_set_pod_target 16
346 #define XENMEM_get_pod_target 17
347 struct xen_pod_target {
348 /* IN */
349 uint64_t target_pages;
350 /* OUT */
351 uint64_t tot_pages;
352 uint64_t pod_cache_pages;
353 uint64_t pod_entries;
354 /* IN */
355 domid_t domid;
356 };
357 typedef struct xen_pod_target xen_pod_target_t;
358
359 #if defined(__XEN__) || defined(__XEN_TOOLS__)
360
361 #ifndef uint64_aligned_t
362 #define uint64_aligned_t uint64_t
363 #endif
364
365 /*
366 * Get the number of MFNs saved through memory sharing.
367 * The call never fails.
368 */
369 #define XENMEM_get_sharing_freed_pages 18
370 #define XENMEM_get_sharing_shared_pages 19
371
372 #define XENMEM_paging_op 20
373 #define XENMEM_paging_op_nominate 0
374 #define XENMEM_paging_op_evict 1
375 #define XENMEM_paging_op_prep 2
376
377 struct xen_mem_event_op {
378 uint8_t op; /* XENMEM_*_op_* */
379 domid_t domain;
380
381
382 /* PAGING_PREP IN: buffer to immediately fill page in */
383 uint64_aligned_t buffer;
384 /* Other OPs */
385 uint64_aligned_t gfn; /* IN: gfn of page being operated on */
386 };
387 typedef struct xen_mem_event_op xen_mem_event_op_t;
388 DEFINE_XEN_GUEST_HANDLE(xen_mem_event_op_t);
389
390 #define XENMEM_access_op 21
391 #define XENMEM_access_op_resume 0
392 #define XENMEM_access_op_set_access 1
393 #define XENMEM_access_op_get_access 2
394
395 typedef enum {
396 XENMEM_access_n,
397 XENMEM_access_r,
398 XENMEM_access_w,
399 XENMEM_access_rw,
400 XENMEM_access_x,
401 XENMEM_access_rx,
402 XENMEM_access_wx,
403 XENMEM_access_rwx,
404 /*
405 * Page starts off as r-x, but automatically
406 * change to r-w on a write
407 */
408 XENMEM_access_rx2rw,
409 /*
410 * Log access: starts off as n, automatically
411 * goes to rwx, generating an event without
412 * pausing the vcpu
413 */
414 XENMEM_access_n2rwx,
415 /* Take the domain default */
416 XENMEM_access_default
417 } xenmem_access_t;
418
419 struct xen_mem_access_op {
420 /* XENMEM_access_op_* */
421 uint8_t op;
422 /* xenmem_access_t */
423 uint8_t access;
424 domid_t domid;
425 /*
426 * Number of pages for set op
427 * Ignored on setting default access and other ops
428 */
429 uint32_t nr;
430 /*
431 * First pfn for set op
432 * pfn for get op
433 * ~0ull is used to set and get the default access for pages
434 */
435 uint64_aligned_t pfn;
436 };
437 typedef struct xen_mem_access_op xen_mem_access_op_t;
438 DEFINE_XEN_GUEST_HANDLE(xen_mem_access_op_t);
439
440 #define XENMEM_sharing_op 22
441 #define XENMEM_sharing_op_nominate_gfn 0
442 #define XENMEM_sharing_op_nominate_gref 1
443 #define XENMEM_sharing_op_share 2
444 #define XENMEM_sharing_op_resume 3
445 #define XENMEM_sharing_op_debug_gfn 4
446 #define XENMEM_sharing_op_debug_mfn 5
447 #define XENMEM_sharing_op_debug_gref 6
448 #define XENMEM_sharing_op_add_physmap 7
449 #define XENMEM_sharing_op_audit 8
450
451 #define XENMEM_SHARING_OP_S_HANDLE_INVALID (-10)
452 #define XENMEM_SHARING_OP_C_HANDLE_INVALID (-9)
453
454 /* The following allows sharing of grant refs. This is useful
455 * for sharing utilities sitting as "filters" in IO backends
456 * (e.g. memshr + blktap(2)). The IO backend is only exposed
457 * to grant references, and this allows sharing of the grefs */
458 #define XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG (1ULL << 62)
459
460 #define XENMEM_SHARING_OP_FIELD_MAKE_GREF(field, val) \
461 (field) = (XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG | val)
462 #define XENMEM_SHARING_OP_FIELD_IS_GREF(field) \
463 ((field) & XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG)
464 #define XENMEM_SHARING_OP_FIELD_GET_GREF(field) \
465 ((field) & (~XENMEM_SHARING_OP_FIELD_IS_GREF_FLAG))
466
467 struct xen_mem_sharing_op {
468 uint8_t op; /* XENMEM_sharing_op_* */
469 domid_t domain;
470
471 union {
472 struct mem_sharing_op_nominate { /* OP_NOMINATE_xxx */
473 union {
474 uint64_aligned_t gfn; /* IN: gfn to nominate */
475 uint32_t grant_ref; /* IN: grant ref to nominate */
476 } u;
477 uint64_aligned_t handle; /* OUT: the handle */
478 } nominate;
479 struct mem_sharing_op_share { /* OP_SHARE/ADD_PHYSMAP */
480 uint64_aligned_t source_gfn; /* IN: the gfn of the source page */
481 uint64_aligned_t source_handle; /* IN: handle to the source page */
482 uint64_aligned_t client_gfn; /* IN: the client gfn */
483 uint64_aligned_t client_handle; /* IN: handle to the client page */
484 domid_t client_domain; /* IN: the client domain id */
485 } share;
486 struct mem_sharing_op_debug { /* OP_DEBUG_xxx */
487 union {
488 uint64_aligned_t gfn; /* IN: gfn to debug */
489 uint64_aligned_t mfn; /* IN: mfn to debug */
490 uint32_t gref; /* IN: gref to debug */
491 } u;
492 } debug;
493 } u;
494 };
495 typedef struct xen_mem_sharing_op xen_mem_sharing_op_t;
496 DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
497
498 /*
499 * Attempt to stake a claim for a domain on a quantity of pages
500 * of system RAM, but _not_ assign specific pageframes. Only
501 * arithmetic is performed so the hypercall is very fast and need
502 * not be preemptible, thus sidestepping time-of-check-time-of-use
503 * races for memory allocation. Returns 0 if the hypervisor page
504 * allocator has atomically and successfully claimed the requested
505 * number of pages, else non-zero.
506 *
507 * Any domain may have only one active claim. When sufficient memory
508 * has been allocated to resolve the claim, the claim silently expires.
509 * Claiming zero pages effectively resets any outstanding claim and
510 * is always successful.
511 *
512 * Note that a valid claim may be staked even after memory has been
513 * allocated for a domain. In this case, the claim is not incremental,
514 * i.e. if the domain's tot_pages is 3, and a claim is staked for 10,
515 * only 7 additional pages are claimed.
516 *
517 * Caller must be privileged or the hypercall fails.
518 */
519 #define XENMEM_claim_pages 24
520
521 /*
522 * XENMEM_claim_pages flags - the are no flags at this time.
523 * The zero value is appropiate.
524 */
525
526 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
527
528 /* Next available subop number is 26 */
529
530 #endif /* __XEN_PUBLIC_MEMORY_H__ */
531
532 /*
533 * Local variables:
534 * mode: C
535 * c-file-style: "BSD"
536 * c-basic-offset: 4
537 * tab-width: 4
538 * indent-tabs-mode: nil
539 * End:
540 */