nbd: Add max-connections to nbd-server-start
[qemu.git] / linux-user / elfload.c
1 /* This is the Linux kernel elf-loading code, ported into user space */
2 #include "qemu/osdep.h"
3 #include <sys/param.h>
4
5 #include <sys/resource.h>
6 #include <sys/shm.h>
7
8 #include "qemu.h"
9 #include "disas/disas.h"
10 #include "qemu/path.h"
11 #include "qemu/queue.h"
12 #include "qemu/guest-random.h"
13 #include "qemu/units.h"
14 #include "qemu/selfmap.h"
15
16 #ifdef _ARCH_PPC64
17 #undef ARCH_DLINFO
18 #undef ELF_PLATFORM
19 #undef ELF_HWCAP
20 #undef ELF_HWCAP2
21 #undef ELF_CLASS
22 #undef ELF_DATA
23 #undef ELF_ARCH
24 #endif
25
26 #define ELF_OSABI ELFOSABI_SYSV
27
28 /* from personality.h */
29
30 /*
31 * Flags for bug emulation.
32 *
33 * These occupy the top three bytes.
34 */
35 enum {
36 ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */
37 FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to
38 descriptors (signal handling) */
39 MMAP_PAGE_ZERO = 0x0100000,
40 ADDR_COMPAT_LAYOUT = 0x0200000,
41 READ_IMPLIES_EXEC = 0x0400000,
42 ADDR_LIMIT_32BIT = 0x0800000,
43 SHORT_INODE = 0x1000000,
44 WHOLE_SECONDS = 0x2000000,
45 STICKY_TIMEOUTS = 0x4000000,
46 ADDR_LIMIT_3GB = 0x8000000,
47 };
48
49 /*
50 * Personality types.
51 *
52 * These go in the low byte. Avoid using the top bit, it will
53 * conflict with error returns.
54 */
55 enum {
56 PER_LINUX = 0x0000,
57 PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
58 PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
59 PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
60 PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
61 PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
62 PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
63 PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
64 PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
65 PER_BSD = 0x0006,
66 PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
67 PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
68 PER_LINUX32 = 0x0008,
69 PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
70 PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
71 PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
72 PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
73 PER_RISCOS = 0x000c,
74 PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
75 PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
76 PER_OSF4 = 0x000f, /* OSF/1 v4 */
77 PER_HPUX = 0x0010,
78 PER_MASK = 0x00ff,
79 };
80
81 /*
82 * Return the base personality without flags.
83 */
84 #define personality(pers) (pers & PER_MASK)
85
86 int info_is_fdpic(struct image_info *info)
87 {
88 return info->personality == PER_LINUX_FDPIC;
89 }
90
91 /* this flag is uneffective under linux too, should be deleted */
92 #ifndef MAP_DENYWRITE
93 #define MAP_DENYWRITE 0
94 #endif
95
96 /* should probably go in elf.h */
97 #ifndef ELIBBAD
98 #define ELIBBAD 80
99 #endif
100
101 #ifdef TARGET_WORDS_BIGENDIAN
102 #define ELF_DATA ELFDATA2MSB
103 #else
104 #define ELF_DATA ELFDATA2LSB
105 #endif
106
107 #ifdef TARGET_ABI_MIPSN32
108 typedef abi_ullong target_elf_greg_t;
109 #define tswapreg(ptr) tswap64(ptr)
110 #else
111 typedef abi_ulong target_elf_greg_t;
112 #define tswapreg(ptr) tswapal(ptr)
113 #endif
114
115 #ifdef USE_UID16
116 typedef abi_ushort target_uid_t;
117 typedef abi_ushort target_gid_t;
118 #else
119 typedef abi_uint target_uid_t;
120 typedef abi_uint target_gid_t;
121 #endif
122 typedef abi_int target_pid_t;
123
124 #ifdef TARGET_I386
125
126 #define ELF_PLATFORM get_elf_platform()
127
128 static const char *get_elf_platform(void)
129 {
130 static char elf_platform[] = "i386";
131 int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL);
132 if (family > 6)
133 family = 6;
134 if (family >= 3)
135 elf_platform[1] = '0' + family;
136 return elf_platform;
137 }
138
139 #define ELF_HWCAP get_elf_hwcap()
140
141 static uint32_t get_elf_hwcap(void)
142 {
143 X86CPU *cpu = X86_CPU(thread_cpu);
144
145 return cpu->env.features[FEAT_1_EDX];
146 }
147
148 #ifdef TARGET_X86_64
149 #define ELF_START_MMAP 0x2aaaaab000ULL
150
151 #define ELF_CLASS ELFCLASS64
152 #define ELF_ARCH EM_X86_64
153
154 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
155 {
156 regs->rax = 0;
157 regs->rsp = infop->start_stack;
158 regs->rip = infop->entry;
159 }
160
161 #define ELF_NREG 27
162 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
163
164 /*
165 * Note that ELF_NREG should be 29 as there should be place for
166 * TRAPNO and ERR "registers" as well but linux doesn't dump
167 * those.
168 *
169 * See linux kernel: arch/x86/include/asm/elf.h
170 */
171 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
172 {
173 (*regs)[0] = env->regs[15];
174 (*regs)[1] = env->regs[14];
175 (*regs)[2] = env->regs[13];
176 (*regs)[3] = env->regs[12];
177 (*regs)[4] = env->regs[R_EBP];
178 (*regs)[5] = env->regs[R_EBX];
179 (*regs)[6] = env->regs[11];
180 (*regs)[7] = env->regs[10];
181 (*regs)[8] = env->regs[9];
182 (*regs)[9] = env->regs[8];
183 (*regs)[10] = env->regs[R_EAX];
184 (*regs)[11] = env->regs[R_ECX];
185 (*regs)[12] = env->regs[R_EDX];
186 (*regs)[13] = env->regs[R_ESI];
187 (*regs)[14] = env->regs[R_EDI];
188 (*regs)[15] = env->regs[R_EAX]; /* XXX */
189 (*regs)[16] = env->eip;
190 (*regs)[17] = env->segs[R_CS].selector & 0xffff;
191 (*regs)[18] = env->eflags;
192 (*regs)[19] = env->regs[R_ESP];
193 (*regs)[20] = env->segs[R_SS].selector & 0xffff;
194 (*regs)[21] = env->segs[R_FS].selector & 0xffff;
195 (*regs)[22] = env->segs[R_GS].selector & 0xffff;
196 (*regs)[23] = env->segs[R_DS].selector & 0xffff;
197 (*regs)[24] = env->segs[R_ES].selector & 0xffff;
198 (*regs)[25] = env->segs[R_FS].selector & 0xffff;
199 (*regs)[26] = env->segs[R_GS].selector & 0xffff;
200 }
201
202 #else
203
204 #define ELF_START_MMAP 0x80000000
205
206 /*
207 * This is used to ensure we don't load something for the wrong architecture.
208 */
209 #define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
210
211 /*
212 * These are used to set parameters in the core dumps.
213 */
214 #define ELF_CLASS ELFCLASS32
215 #define ELF_ARCH EM_386
216
217 static inline void init_thread(struct target_pt_regs *regs,
218 struct image_info *infop)
219 {
220 regs->esp = infop->start_stack;
221 regs->eip = infop->entry;
222
223 /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
224 starts %edx contains a pointer to a function which might be
225 registered using `atexit'. This provides a mean for the
226 dynamic linker to call DT_FINI functions for shared libraries
227 that have been loaded before the code runs.
228
229 A value of 0 tells we have no such handler. */
230 regs->edx = 0;
231 }
232
233 #define ELF_NREG 17
234 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
235
236 /*
237 * Note that ELF_NREG should be 19 as there should be place for
238 * TRAPNO and ERR "registers" as well but linux doesn't dump
239 * those.
240 *
241 * See linux kernel: arch/x86/include/asm/elf.h
242 */
243 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
244 {
245 (*regs)[0] = env->regs[R_EBX];
246 (*regs)[1] = env->regs[R_ECX];
247 (*regs)[2] = env->regs[R_EDX];
248 (*regs)[3] = env->regs[R_ESI];
249 (*regs)[4] = env->regs[R_EDI];
250 (*regs)[5] = env->regs[R_EBP];
251 (*regs)[6] = env->regs[R_EAX];
252 (*regs)[7] = env->segs[R_DS].selector & 0xffff;
253 (*regs)[8] = env->segs[R_ES].selector & 0xffff;
254 (*regs)[9] = env->segs[R_FS].selector & 0xffff;
255 (*regs)[10] = env->segs[R_GS].selector & 0xffff;
256 (*regs)[11] = env->regs[R_EAX]; /* XXX */
257 (*regs)[12] = env->eip;
258 (*regs)[13] = env->segs[R_CS].selector & 0xffff;
259 (*regs)[14] = env->eflags;
260 (*regs)[15] = env->regs[R_ESP];
261 (*regs)[16] = env->segs[R_SS].selector & 0xffff;
262 }
263 #endif
264
265 #define USE_ELF_CORE_DUMP
266 #define ELF_EXEC_PAGESIZE 4096
267
268 #endif
269
270 #ifdef TARGET_ARM
271
272 #ifndef TARGET_AARCH64
273 /* 32 bit ARM definitions */
274
275 #define ELF_START_MMAP 0x80000000
276
277 #define ELF_ARCH EM_ARM
278 #define ELF_CLASS ELFCLASS32
279
280 static inline void init_thread(struct target_pt_regs *regs,
281 struct image_info *infop)
282 {
283 abi_long stack = infop->start_stack;
284 memset(regs, 0, sizeof(*regs));
285
286 regs->uregs[16] = ARM_CPU_MODE_USR;
287 if (infop->entry & 1) {
288 regs->uregs[16] |= CPSR_T;
289 }
290 regs->uregs[15] = infop->entry & 0xfffffffe;
291 regs->uregs[13] = infop->start_stack;
292 /* FIXME - what to for failure of get_user()? */
293 get_user_ual(regs->uregs[2], stack + 8); /* envp */
294 get_user_ual(regs->uregs[1], stack + 4); /* envp */
295 /* XXX: it seems that r0 is zeroed after ! */
296 regs->uregs[0] = 0;
297 /* For uClinux PIC binaries. */
298 /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
299 regs->uregs[10] = infop->start_data;
300
301 /* Support ARM FDPIC. */
302 if (info_is_fdpic(infop)) {
303 /* As described in the ABI document, r7 points to the loadmap info
304 * prepared by the kernel. If an interpreter is needed, r8 points
305 * to the interpreter loadmap and r9 points to the interpreter
306 * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and
307 * r9 points to the main program PT_DYNAMIC info.
308 */
309 regs->uregs[7] = infop->loadmap_addr;
310 if (infop->interpreter_loadmap_addr) {
311 /* Executable is dynamically loaded. */
312 regs->uregs[8] = infop->interpreter_loadmap_addr;
313 regs->uregs[9] = infop->interpreter_pt_dynamic_addr;
314 } else {
315 regs->uregs[8] = 0;
316 regs->uregs[9] = infop->pt_dynamic_addr;
317 }
318 }
319 }
320
321 #define ELF_NREG 18
322 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
323
324 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
325 {
326 (*regs)[0] = tswapreg(env->regs[0]);
327 (*regs)[1] = tswapreg(env->regs[1]);
328 (*regs)[2] = tswapreg(env->regs[2]);
329 (*regs)[3] = tswapreg(env->regs[3]);
330 (*regs)[4] = tswapreg(env->regs[4]);
331 (*regs)[5] = tswapreg(env->regs[5]);
332 (*regs)[6] = tswapreg(env->regs[6]);
333 (*regs)[7] = tswapreg(env->regs[7]);
334 (*regs)[8] = tswapreg(env->regs[8]);
335 (*regs)[9] = tswapreg(env->regs[9]);
336 (*regs)[10] = tswapreg(env->regs[10]);
337 (*regs)[11] = tswapreg(env->regs[11]);
338 (*regs)[12] = tswapreg(env->regs[12]);
339 (*regs)[13] = tswapreg(env->regs[13]);
340 (*regs)[14] = tswapreg(env->regs[14]);
341 (*regs)[15] = tswapreg(env->regs[15]);
342
343 (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env));
344 (*regs)[17] = tswapreg(env->regs[0]); /* XXX */
345 }
346
347 #define USE_ELF_CORE_DUMP
348 #define ELF_EXEC_PAGESIZE 4096
349
350 enum
351 {
352 ARM_HWCAP_ARM_SWP = 1 << 0,
353 ARM_HWCAP_ARM_HALF = 1 << 1,
354 ARM_HWCAP_ARM_THUMB = 1 << 2,
355 ARM_HWCAP_ARM_26BIT = 1 << 3,
356 ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
357 ARM_HWCAP_ARM_FPA = 1 << 5,
358 ARM_HWCAP_ARM_VFP = 1 << 6,
359 ARM_HWCAP_ARM_EDSP = 1 << 7,
360 ARM_HWCAP_ARM_JAVA = 1 << 8,
361 ARM_HWCAP_ARM_IWMMXT = 1 << 9,
362 ARM_HWCAP_ARM_CRUNCH = 1 << 10,
363 ARM_HWCAP_ARM_THUMBEE = 1 << 11,
364 ARM_HWCAP_ARM_NEON = 1 << 12,
365 ARM_HWCAP_ARM_VFPv3 = 1 << 13,
366 ARM_HWCAP_ARM_VFPv3D16 = 1 << 14,
367 ARM_HWCAP_ARM_TLS = 1 << 15,
368 ARM_HWCAP_ARM_VFPv4 = 1 << 16,
369 ARM_HWCAP_ARM_IDIVA = 1 << 17,
370 ARM_HWCAP_ARM_IDIVT = 1 << 18,
371 ARM_HWCAP_ARM_VFPD32 = 1 << 19,
372 ARM_HWCAP_ARM_LPAE = 1 << 20,
373 ARM_HWCAP_ARM_EVTSTRM = 1 << 21,
374 };
375
376 enum {
377 ARM_HWCAP2_ARM_AES = 1 << 0,
378 ARM_HWCAP2_ARM_PMULL = 1 << 1,
379 ARM_HWCAP2_ARM_SHA1 = 1 << 2,
380 ARM_HWCAP2_ARM_SHA2 = 1 << 3,
381 ARM_HWCAP2_ARM_CRC32 = 1 << 4,
382 };
383
384 /* The commpage only exists for 32 bit kernels */
385
386 #define ARM_COMMPAGE (intptr_t)0xffff0f00u
387
388 static bool init_guest_commpage(void)
389 {
390 void *want = g2h(ARM_COMMPAGE & -qemu_host_page_size);
391 void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
392 MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
393
394 if (addr == MAP_FAILED) {
395 perror("Allocating guest commpage");
396 exit(EXIT_FAILURE);
397 }
398 if (addr != want) {
399 return false;
400 }
401
402 /* Set kernel helper versions; rest of page is 0. */
403 __put_user(5, (uint32_t *)g2h(0xffff0ffcu));
404
405 if (mprotect(addr, qemu_host_page_size, PROT_READ)) {
406 perror("Protecting guest commpage");
407 exit(EXIT_FAILURE);
408 }
409 return true;
410 }
411
412 #define ELF_HWCAP get_elf_hwcap()
413 #define ELF_HWCAP2 get_elf_hwcap2()
414
415 static uint32_t get_elf_hwcap(void)
416 {
417 ARMCPU *cpu = ARM_CPU(thread_cpu);
418 uint32_t hwcaps = 0;
419
420 hwcaps |= ARM_HWCAP_ARM_SWP;
421 hwcaps |= ARM_HWCAP_ARM_HALF;
422 hwcaps |= ARM_HWCAP_ARM_THUMB;
423 hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
424
425 /* probe for the extra features */
426 #define GET_FEATURE(feat, hwcap) \
427 do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0)
428
429 #define GET_FEATURE_ID(feat, hwcap) \
430 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
431
432 /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */
433 GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP);
434 GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
435 GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
436 GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
437 GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS);
438 GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE);
439 GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA);
440 GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT);
441 GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP);
442
443 if (cpu_isar_feature(aa32_fpsp_v3, cpu) ||
444 cpu_isar_feature(aa32_fpdp_v3, cpu)) {
445 hwcaps |= ARM_HWCAP_ARM_VFPv3;
446 if (cpu_isar_feature(aa32_simd_r32, cpu)) {
447 hwcaps |= ARM_HWCAP_ARM_VFPD32;
448 } else {
449 hwcaps |= ARM_HWCAP_ARM_VFPv3D16;
450 }
451 }
452 GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4);
453
454 return hwcaps;
455 }
456
457 static uint32_t get_elf_hwcap2(void)
458 {
459 ARMCPU *cpu = ARM_CPU(thread_cpu);
460 uint32_t hwcaps = 0;
461
462 GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES);
463 GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL);
464 GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1);
465 GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2);
466 GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32);
467 return hwcaps;
468 }
469
470 #undef GET_FEATURE
471 #undef GET_FEATURE_ID
472
473 #define ELF_PLATFORM get_elf_platform()
474
475 static const char *get_elf_platform(void)
476 {
477 CPUARMState *env = thread_cpu->env_ptr;
478
479 #ifdef TARGET_WORDS_BIGENDIAN
480 # define END "b"
481 #else
482 # define END "l"
483 #endif
484
485 if (arm_feature(env, ARM_FEATURE_V8)) {
486 return "v8" END;
487 } else if (arm_feature(env, ARM_FEATURE_V7)) {
488 if (arm_feature(env, ARM_FEATURE_M)) {
489 return "v7m" END;
490 } else {
491 return "v7" END;
492 }
493 } else if (arm_feature(env, ARM_FEATURE_V6)) {
494 return "v6" END;
495 } else if (arm_feature(env, ARM_FEATURE_V5)) {
496 return "v5" END;
497 } else {
498 return "v4" END;
499 }
500
501 #undef END
502 }
503
504 #else
505 /* 64 bit ARM definitions */
506 #define ELF_START_MMAP 0x80000000
507
508 #define ELF_ARCH EM_AARCH64
509 #define ELF_CLASS ELFCLASS64
510 #ifdef TARGET_WORDS_BIGENDIAN
511 # define ELF_PLATFORM "aarch64_be"
512 #else
513 # define ELF_PLATFORM "aarch64"
514 #endif
515
516 static inline void init_thread(struct target_pt_regs *regs,
517 struct image_info *infop)
518 {
519 abi_long stack = infop->start_stack;
520 memset(regs, 0, sizeof(*regs));
521
522 regs->pc = infop->entry & ~0x3ULL;
523 regs->sp = stack;
524 }
525
526 #define ELF_NREG 34
527 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
528
529 static void elf_core_copy_regs(target_elf_gregset_t *regs,
530 const CPUARMState *env)
531 {
532 int i;
533
534 for (i = 0; i < 32; i++) {
535 (*regs)[i] = tswapreg(env->xregs[i]);
536 }
537 (*regs)[32] = tswapreg(env->pc);
538 (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env));
539 }
540
541 #define USE_ELF_CORE_DUMP
542 #define ELF_EXEC_PAGESIZE 4096
543
544 enum {
545 ARM_HWCAP_A64_FP = 1 << 0,
546 ARM_HWCAP_A64_ASIMD = 1 << 1,
547 ARM_HWCAP_A64_EVTSTRM = 1 << 2,
548 ARM_HWCAP_A64_AES = 1 << 3,
549 ARM_HWCAP_A64_PMULL = 1 << 4,
550 ARM_HWCAP_A64_SHA1 = 1 << 5,
551 ARM_HWCAP_A64_SHA2 = 1 << 6,
552 ARM_HWCAP_A64_CRC32 = 1 << 7,
553 ARM_HWCAP_A64_ATOMICS = 1 << 8,
554 ARM_HWCAP_A64_FPHP = 1 << 9,
555 ARM_HWCAP_A64_ASIMDHP = 1 << 10,
556 ARM_HWCAP_A64_CPUID = 1 << 11,
557 ARM_HWCAP_A64_ASIMDRDM = 1 << 12,
558 ARM_HWCAP_A64_JSCVT = 1 << 13,
559 ARM_HWCAP_A64_FCMA = 1 << 14,
560 ARM_HWCAP_A64_LRCPC = 1 << 15,
561 ARM_HWCAP_A64_DCPOP = 1 << 16,
562 ARM_HWCAP_A64_SHA3 = 1 << 17,
563 ARM_HWCAP_A64_SM3 = 1 << 18,
564 ARM_HWCAP_A64_SM4 = 1 << 19,
565 ARM_HWCAP_A64_ASIMDDP = 1 << 20,
566 ARM_HWCAP_A64_SHA512 = 1 << 21,
567 ARM_HWCAP_A64_SVE = 1 << 22,
568 ARM_HWCAP_A64_ASIMDFHM = 1 << 23,
569 ARM_HWCAP_A64_DIT = 1 << 24,
570 ARM_HWCAP_A64_USCAT = 1 << 25,
571 ARM_HWCAP_A64_ILRCPC = 1 << 26,
572 ARM_HWCAP_A64_FLAGM = 1 << 27,
573 ARM_HWCAP_A64_SSBS = 1 << 28,
574 ARM_HWCAP_A64_SB = 1 << 29,
575 ARM_HWCAP_A64_PACA = 1 << 30,
576 ARM_HWCAP_A64_PACG = 1UL << 31,
577
578 ARM_HWCAP2_A64_DCPODP = 1 << 0,
579 ARM_HWCAP2_A64_SVE2 = 1 << 1,
580 ARM_HWCAP2_A64_SVEAES = 1 << 2,
581 ARM_HWCAP2_A64_SVEPMULL = 1 << 3,
582 ARM_HWCAP2_A64_SVEBITPERM = 1 << 4,
583 ARM_HWCAP2_A64_SVESHA3 = 1 << 5,
584 ARM_HWCAP2_A64_SVESM4 = 1 << 6,
585 ARM_HWCAP2_A64_FLAGM2 = 1 << 7,
586 ARM_HWCAP2_A64_FRINT = 1 << 8,
587 };
588
589 #define ELF_HWCAP get_elf_hwcap()
590 #define ELF_HWCAP2 get_elf_hwcap2()
591
592 #define GET_FEATURE_ID(feat, hwcap) \
593 do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0)
594
595 static uint32_t get_elf_hwcap(void)
596 {
597 ARMCPU *cpu = ARM_CPU(thread_cpu);
598 uint32_t hwcaps = 0;
599
600 hwcaps |= ARM_HWCAP_A64_FP;
601 hwcaps |= ARM_HWCAP_A64_ASIMD;
602 hwcaps |= ARM_HWCAP_A64_CPUID;
603
604 /* probe for the extra features */
605
606 GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES);
607 GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL);
608 GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1);
609 GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2);
610 GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512);
611 GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32);
612 GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3);
613 GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3);
614 GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4);
615 GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP);
616 GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS);
617 GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM);
618 GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP);
619 GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA);
620 GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE);
621 GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG);
622 GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM);
623 GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT);
624 GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB);
625 GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM);
626 GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP);
627 GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC);
628 GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC);
629
630 return hwcaps;
631 }
632
633 static uint32_t get_elf_hwcap2(void)
634 {
635 ARMCPU *cpu = ARM_CPU(thread_cpu);
636 uint32_t hwcaps = 0;
637
638 GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP);
639 GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2);
640 GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT);
641
642 return hwcaps;
643 }
644
645 #undef GET_FEATURE_ID
646
647 #endif /* not TARGET_AARCH64 */
648 #endif /* TARGET_ARM */
649
650 #ifdef TARGET_SPARC
651 #ifdef TARGET_SPARC64
652
653 #define ELF_START_MMAP 0x80000000
654 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
655 | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
656 #ifndef TARGET_ABI32
657 #define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
658 #else
659 #define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
660 #endif
661
662 #define ELF_CLASS ELFCLASS64
663 #define ELF_ARCH EM_SPARCV9
664
665 #define STACK_BIAS 2047
666
667 static inline void init_thread(struct target_pt_regs *regs,
668 struct image_info *infop)
669 {
670 #ifndef TARGET_ABI32
671 regs->tstate = 0;
672 #endif
673 regs->pc = infop->entry;
674 regs->npc = regs->pc + 4;
675 regs->y = 0;
676 #ifdef TARGET_ABI32
677 regs->u_regs[14] = infop->start_stack - 16 * 4;
678 #else
679 if (personality(infop->personality) == PER_LINUX32)
680 regs->u_regs[14] = infop->start_stack - 16 * 4;
681 else
682 regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
683 #endif
684 }
685
686 #else
687 #define ELF_START_MMAP 0x80000000
688 #define ELF_HWCAP (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
689 | HWCAP_SPARC_MULDIV)
690
691 #define ELF_CLASS ELFCLASS32
692 #define ELF_ARCH EM_SPARC
693
694 static inline void init_thread(struct target_pt_regs *regs,
695 struct image_info *infop)
696 {
697 regs->psr = 0;
698 regs->pc = infop->entry;
699 regs->npc = regs->pc + 4;
700 regs->y = 0;
701 regs->u_regs[14] = infop->start_stack - 16 * 4;
702 }
703
704 #endif
705 #endif
706
707 #ifdef TARGET_PPC
708
709 #define ELF_MACHINE PPC_ELF_MACHINE
710 #define ELF_START_MMAP 0x80000000
711
712 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
713
714 #define elf_check_arch(x) ( (x) == EM_PPC64 )
715
716 #define ELF_CLASS ELFCLASS64
717
718 #else
719
720 #define ELF_CLASS ELFCLASS32
721
722 #endif
723
724 #define ELF_ARCH EM_PPC
725
726 /* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
727 See arch/powerpc/include/asm/cputable.h. */
728 enum {
729 QEMU_PPC_FEATURE_32 = 0x80000000,
730 QEMU_PPC_FEATURE_64 = 0x40000000,
731 QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
732 QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
733 QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
734 QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
735 QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
736 QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
737 QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
738 QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
739 QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
740 QEMU_PPC_FEATURE_NO_TB = 0x00100000,
741 QEMU_PPC_FEATURE_POWER4 = 0x00080000,
742 QEMU_PPC_FEATURE_POWER5 = 0x00040000,
743 QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
744 QEMU_PPC_FEATURE_CELL = 0x00010000,
745 QEMU_PPC_FEATURE_BOOKE = 0x00008000,
746 QEMU_PPC_FEATURE_SMT = 0x00004000,
747 QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
748 QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
749 QEMU_PPC_FEATURE_PA6T = 0x00000800,
750 QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
751 QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
752 QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
753 QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
754 QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
755
756 QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
757 QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
758
759 /* Feature definitions in AT_HWCAP2. */
760 QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */
761 QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */
762 QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */
763 QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */
764 QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */
765 QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */
766 QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000,
767 QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000,
768 QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */
769 QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */
770 QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */
771 QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */
772 QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */
773 };
774
775 #define ELF_HWCAP get_elf_hwcap()
776
777 static uint32_t get_elf_hwcap(void)
778 {
779 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
780 uint32_t features = 0;
781
782 /* We don't have to be terribly complete here; the high points are
783 Altivec/FP/SPE support. Anything else is just a bonus. */
784 #define GET_FEATURE(flag, feature) \
785 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
786 #define GET_FEATURE2(flags, feature) \
787 do { \
788 if ((cpu->env.insns_flags2 & flags) == flags) { \
789 features |= feature; \
790 } \
791 } while (0)
792 GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
793 GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
794 GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
795 GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
796 GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
797 GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
798 GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
799 GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
800 GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP);
801 GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX);
802 GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 |
803 PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206),
804 QEMU_PPC_FEATURE_ARCH_2_06);
805 #undef GET_FEATURE
806 #undef GET_FEATURE2
807
808 return features;
809 }
810
811 #define ELF_HWCAP2 get_elf_hwcap2()
812
813 static uint32_t get_elf_hwcap2(void)
814 {
815 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu);
816 uint32_t features = 0;
817
818 #define GET_FEATURE(flag, feature) \
819 do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0)
820 #define GET_FEATURE2(flag, feature) \
821 do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0)
822
823 GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL);
824 GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR);
825 GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 |
826 PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 |
827 QEMU_PPC_FEATURE2_VEC_CRYPTO);
828 GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 |
829 QEMU_PPC_FEATURE2_DARN);
830
831 #undef GET_FEATURE
832 #undef GET_FEATURE2
833
834 return features;
835 }
836
837 /*
838 * The requirements here are:
839 * - keep the final alignment of sp (sp & 0xf)
840 * - make sure the 32-bit value at the first 16 byte aligned position of
841 * AUXV is greater than 16 for glibc compatibility.
842 * AT_IGNOREPPC is used for that.
843 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
844 * even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
845 */
846 #define DLINFO_ARCH_ITEMS 5
847 #define ARCH_DLINFO \
848 do { \
849 PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \
850 /* \
851 * Handle glibc compatibility: these magic entries must \
852 * be at the lowest addresses in the final auxv. \
853 */ \
854 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
855 NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \
856 NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \
857 NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \
858 NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
859 } while (0)
860
861 static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
862 {
863 _regs->gpr[1] = infop->start_stack;
864 #if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
865 if (get_ppc64_abi(infop) < 2) {
866 uint64_t val;
867 get_user_u64(val, infop->entry + 8);
868 _regs->gpr[2] = val + infop->load_bias;
869 get_user_u64(val, infop->entry);
870 infop->entry = val + infop->load_bias;
871 } else {
872 _regs->gpr[12] = infop->entry; /* r12 set to global entry address */
873 }
874 #endif
875 _regs->nip = infop->entry;
876 }
877
878 /* See linux kernel: arch/powerpc/include/asm/elf.h. */
879 #define ELF_NREG 48
880 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
881
882 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
883 {
884 int i;
885 target_ulong ccr = 0;
886
887 for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
888 (*regs)[i] = tswapreg(env->gpr[i]);
889 }
890
891 (*regs)[32] = tswapreg(env->nip);
892 (*regs)[33] = tswapreg(env->msr);
893 (*regs)[35] = tswapreg(env->ctr);
894 (*regs)[36] = tswapreg(env->lr);
895 (*regs)[37] = tswapreg(env->xer);
896
897 for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
898 ccr |= env->crf[i] << (32 - ((i + 1) * 4));
899 }
900 (*regs)[38] = tswapreg(ccr);
901 }
902
903 #define USE_ELF_CORE_DUMP
904 #define ELF_EXEC_PAGESIZE 4096
905
906 #endif
907
908 #ifdef TARGET_MIPS
909
910 #define ELF_START_MMAP 0x80000000
911
912 #ifdef TARGET_MIPS64
913 #define ELF_CLASS ELFCLASS64
914 #else
915 #define ELF_CLASS ELFCLASS32
916 #endif
917 #define ELF_ARCH EM_MIPS
918
919 #define elf_check_arch(x) ((x) == EM_MIPS || (x) == EM_NANOMIPS)
920
921 #ifdef TARGET_ABI_MIPSN32
922 #define elf_check_abi(x) ((x) & EF_MIPS_ABI2)
923 #else
924 #define elf_check_abi(x) (!((x) & EF_MIPS_ABI2))
925 #endif
926
927 static inline void init_thread(struct target_pt_regs *regs,
928 struct image_info *infop)
929 {
930 regs->cp0_status = 2 << CP0St_KSU;
931 regs->cp0_epc = infop->entry;
932 regs->regs[29] = infop->start_stack;
933 }
934
935 /* See linux kernel: arch/mips/include/asm/elf.h. */
936 #define ELF_NREG 45
937 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
938
939 /* See linux kernel: arch/mips/include/asm/reg.h. */
940 enum {
941 #ifdef TARGET_MIPS64
942 TARGET_EF_R0 = 0,
943 #else
944 TARGET_EF_R0 = 6,
945 #endif
946 TARGET_EF_R26 = TARGET_EF_R0 + 26,
947 TARGET_EF_R27 = TARGET_EF_R0 + 27,
948 TARGET_EF_LO = TARGET_EF_R0 + 32,
949 TARGET_EF_HI = TARGET_EF_R0 + 33,
950 TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
951 TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
952 TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
953 TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
954 };
955
956 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
957 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
958 {
959 int i;
960
961 for (i = 0; i < TARGET_EF_R0; i++) {
962 (*regs)[i] = 0;
963 }
964 (*regs)[TARGET_EF_R0] = 0;
965
966 for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
967 (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]);
968 }
969
970 (*regs)[TARGET_EF_R26] = 0;
971 (*regs)[TARGET_EF_R27] = 0;
972 (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]);
973 (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]);
974 (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC);
975 (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr);
976 (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status);
977 (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause);
978 }
979
980 #define USE_ELF_CORE_DUMP
981 #define ELF_EXEC_PAGESIZE 4096
982
983 /* See arch/mips/include/uapi/asm/hwcap.h. */
984 enum {
985 HWCAP_MIPS_R6 = (1 << 0),
986 HWCAP_MIPS_MSA = (1 << 1),
987 };
988
989 #define ELF_HWCAP get_elf_hwcap()
990
991 static uint32_t get_elf_hwcap(void)
992 {
993 MIPSCPU *cpu = MIPS_CPU(thread_cpu);
994 uint32_t hwcaps = 0;
995
996 #define GET_FEATURE(flag, hwcap) \
997 do { if (cpu->env.insn_flags & (flag)) { hwcaps |= hwcap; } } while (0)
998
999 GET_FEATURE(ISA_MIPS32R6 | ISA_MIPS64R6, HWCAP_MIPS_R6);
1000 GET_FEATURE(ASE_MSA, HWCAP_MIPS_MSA);
1001
1002 #undef GET_FEATURE
1003
1004 return hwcaps;
1005 }
1006
1007 #endif /* TARGET_MIPS */
1008
1009 #ifdef TARGET_MICROBLAZE
1010
1011 #define ELF_START_MMAP 0x80000000
1012
1013 #define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
1014
1015 #define ELF_CLASS ELFCLASS32
1016 #define ELF_ARCH EM_MICROBLAZE
1017
1018 static inline void init_thread(struct target_pt_regs *regs,
1019 struct image_info *infop)
1020 {
1021 regs->pc = infop->entry;
1022 regs->r1 = infop->start_stack;
1023
1024 }
1025
1026 #define ELF_EXEC_PAGESIZE 4096
1027
1028 #define USE_ELF_CORE_DUMP
1029 #define ELF_NREG 38
1030 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1031
1032 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
1033 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
1034 {
1035 int i, pos = 0;
1036
1037 for (i = 0; i < 32; i++) {
1038 (*regs)[pos++] = tswapreg(env->regs[i]);
1039 }
1040
1041 (*regs)[pos++] = tswapreg(env->pc);
1042 (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env));
1043 (*regs)[pos++] = 0;
1044 (*regs)[pos++] = tswapreg(env->ear);
1045 (*regs)[pos++] = 0;
1046 (*regs)[pos++] = tswapreg(env->esr);
1047 }
1048
1049 #endif /* TARGET_MICROBLAZE */
1050
1051 #ifdef TARGET_NIOS2
1052
1053 #define ELF_START_MMAP 0x80000000
1054
1055 #define elf_check_arch(x) ((x) == EM_ALTERA_NIOS2)
1056
1057 #define ELF_CLASS ELFCLASS32
1058 #define ELF_ARCH EM_ALTERA_NIOS2
1059
1060 static void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1061 {
1062 regs->ea = infop->entry;
1063 regs->sp = infop->start_stack;
1064 regs->estatus = 0x3;
1065 }
1066
1067 #define ELF_EXEC_PAGESIZE 4096
1068
1069 #define USE_ELF_CORE_DUMP
1070 #define ELF_NREG 49
1071 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1072
1073 /* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */
1074 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1075 const CPUNios2State *env)
1076 {
1077 int i;
1078
1079 (*regs)[0] = -1;
1080 for (i = 1; i < 8; i++) /* r0-r7 */
1081 (*regs)[i] = tswapreg(env->regs[i + 7]);
1082
1083 for (i = 8; i < 16; i++) /* r8-r15 */
1084 (*regs)[i] = tswapreg(env->regs[i - 8]);
1085
1086 for (i = 16; i < 24; i++) /* r16-r23 */
1087 (*regs)[i] = tswapreg(env->regs[i + 7]);
1088 (*regs)[24] = -1; /* R_ET */
1089 (*regs)[25] = -1; /* R_BT */
1090 (*regs)[26] = tswapreg(env->regs[R_GP]);
1091 (*regs)[27] = tswapreg(env->regs[R_SP]);
1092 (*regs)[28] = tswapreg(env->regs[R_FP]);
1093 (*regs)[29] = tswapreg(env->regs[R_EA]);
1094 (*regs)[30] = -1; /* R_SSTATUS */
1095 (*regs)[31] = tswapreg(env->regs[R_RA]);
1096
1097 (*regs)[32] = tswapreg(env->regs[R_PC]);
1098
1099 (*regs)[33] = -1; /* R_STATUS */
1100 (*regs)[34] = tswapreg(env->regs[CR_ESTATUS]);
1101
1102 for (i = 35; i < 49; i++) /* ... */
1103 (*regs)[i] = -1;
1104 }
1105
1106 #endif /* TARGET_NIOS2 */
1107
1108 #ifdef TARGET_OPENRISC
1109
1110 #define ELF_START_MMAP 0x08000000
1111
1112 #define ELF_ARCH EM_OPENRISC
1113 #define ELF_CLASS ELFCLASS32
1114 #define ELF_DATA ELFDATA2MSB
1115
1116 static inline void init_thread(struct target_pt_regs *regs,
1117 struct image_info *infop)
1118 {
1119 regs->pc = infop->entry;
1120 regs->gpr[1] = infop->start_stack;
1121 }
1122
1123 #define USE_ELF_CORE_DUMP
1124 #define ELF_EXEC_PAGESIZE 8192
1125
1126 /* See linux kernel arch/openrisc/include/asm/elf.h. */
1127 #define ELF_NREG 34 /* gprs and pc, sr */
1128 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1129
1130 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1131 const CPUOpenRISCState *env)
1132 {
1133 int i;
1134
1135 for (i = 0; i < 32; i++) {
1136 (*regs)[i] = tswapreg(cpu_get_gpr(env, i));
1137 }
1138 (*regs)[32] = tswapreg(env->pc);
1139 (*regs)[33] = tswapreg(cpu_get_sr(env));
1140 }
1141 #define ELF_HWCAP 0
1142 #define ELF_PLATFORM NULL
1143
1144 #endif /* TARGET_OPENRISC */
1145
1146 #ifdef TARGET_SH4
1147
1148 #define ELF_START_MMAP 0x80000000
1149
1150 #define ELF_CLASS ELFCLASS32
1151 #define ELF_ARCH EM_SH
1152
1153 static inline void init_thread(struct target_pt_regs *regs,
1154 struct image_info *infop)
1155 {
1156 /* Check other registers XXXXX */
1157 regs->pc = infop->entry;
1158 regs->regs[15] = infop->start_stack;
1159 }
1160
1161 /* See linux kernel: arch/sh/include/asm/elf.h. */
1162 #define ELF_NREG 23
1163 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1164
1165 /* See linux kernel: arch/sh/include/asm/ptrace.h. */
1166 enum {
1167 TARGET_REG_PC = 16,
1168 TARGET_REG_PR = 17,
1169 TARGET_REG_SR = 18,
1170 TARGET_REG_GBR = 19,
1171 TARGET_REG_MACH = 20,
1172 TARGET_REG_MACL = 21,
1173 TARGET_REG_SYSCALL = 22
1174 };
1175
1176 static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
1177 const CPUSH4State *env)
1178 {
1179 int i;
1180
1181 for (i = 0; i < 16; i++) {
1182 (*regs)[i] = tswapreg(env->gregs[i]);
1183 }
1184
1185 (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1186 (*regs)[TARGET_REG_PR] = tswapreg(env->pr);
1187 (*regs)[TARGET_REG_SR] = tswapreg(env->sr);
1188 (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr);
1189 (*regs)[TARGET_REG_MACH] = tswapreg(env->mach);
1190 (*regs)[TARGET_REG_MACL] = tswapreg(env->macl);
1191 (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
1192 }
1193
1194 #define USE_ELF_CORE_DUMP
1195 #define ELF_EXEC_PAGESIZE 4096
1196
1197 enum {
1198 SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */
1199 SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */
1200 SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */
1201 SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */
1202 SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */
1203 SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */
1204 SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */
1205 SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */
1206 SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */
1207 SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */
1208 };
1209
1210 #define ELF_HWCAP get_elf_hwcap()
1211
1212 static uint32_t get_elf_hwcap(void)
1213 {
1214 SuperHCPU *cpu = SUPERH_CPU(thread_cpu);
1215 uint32_t hwcap = 0;
1216
1217 hwcap |= SH_CPU_HAS_FPU;
1218
1219 if (cpu->env.features & SH_FEATURE_SH4A) {
1220 hwcap |= SH_CPU_HAS_LLSC;
1221 }
1222
1223 return hwcap;
1224 }
1225
1226 #endif
1227
1228 #ifdef TARGET_CRIS
1229
1230 #define ELF_START_MMAP 0x80000000
1231
1232 #define ELF_CLASS ELFCLASS32
1233 #define ELF_ARCH EM_CRIS
1234
1235 static inline void init_thread(struct target_pt_regs *regs,
1236 struct image_info *infop)
1237 {
1238 regs->erp = infop->entry;
1239 }
1240
1241 #define ELF_EXEC_PAGESIZE 8192
1242
1243 #endif
1244
1245 #ifdef TARGET_M68K
1246
1247 #define ELF_START_MMAP 0x80000000
1248
1249 #define ELF_CLASS ELFCLASS32
1250 #define ELF_ARCH EM_68K
1251
1252 /* ??? Does this need to do anything?
1253 #define ELF_PLAT_INIT(_r) */
1254
1255 static inline void init_thread(struct target_pt_regs *regs,
1256 struct image_info *infop)
1257 {
1258 regs->usp = infop->start_stack;
1259 regs->sr = 0;
1260 regs->pc = infop->entry;
1261 }
1262
1263 /* See linux kernel: arch/m68k/include/asm/elf.h. */
1264 #define ELF_NREG 20
1265 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1266
1267 static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
1268 {
1269 (*regs)[0] = tswapreg(env->dregs[1]);
1270 (*regs)[1] = tswapreg(env->dregs[2]);
1271 (*regs)[2] = tswapreg(env->dregs[3]);
1272 (*regs)[3] = tswapreg(env->dregs[4]);
1273 (*regs)[4] = tswapreg(env->dregs[5]);
1274 (*regs)[5] = tswapreg(env->dregs[6]);
1275 (*regs)[6] = tswapreg(env->dregs[7]);
1276 (*regs)[7] = tswapreg(env->aregs[0]);
1277 (*regs)[8] = tswapreg(env->aregs[1]);
1278 (*regs)[9] = tswapreg(env->aregs[2]);
1279 (*regs)[10] = tswapreg(env->aregs[3]);
1280 (*regs)[11] = tswapreg(env->aregs[4]);
1281 (*regs)[12] = tswapreg(env->aregs[5]);
1282 (*regs)[13] = tswapreg(env->aregs[6]);
1283 (*regs)[14] = tswapreg(env->dregs[0]);
1284 (*regs)[15] = tswapreg(env->aregs[7]);
1285 (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */
1286 (*regs)[17] = tswapreg(env->sr);
1287 (*regs)[18] = tswapreg(env->pc);
1288 (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */
1289 }
1290
1291 #define USE_ELF_CORE_DUMP
1292 #define ELF_EXEC_PAGESIZE 8192
1293
1294 #endif
1295
1296 #ifdef TARGET_ALPHA
1297
1298 #define ELF_START_MMAP (0x30000000000ULL)
1299
1300 #define ELF_CLASS ELFCLASS64
1301 #define ELF_ARCH EM_ALPHA
1302
1303 static inline void init_thread(struct target_pt_regs *regs,
1304 struct image_info *infop)
1305 {
1306 regs->pc = infop->entry;
1307 regs->ps = 8;
1308 regs->usp = infop->start_stack;
1309 }
1310
1311 #define ELF_EXEC_PAGESIZE 8192
1312
1313 #endif /* TARGET_ALPHA */
1314
1315 #ifdef TARGET_S390X
1316
1317 #define ELF_START_MMAP (0x20000000000ULL)
1318
1319 #define ELF_CLASS ELFCLASS64
1320 #define ELF_DATA ELFDATA2MSB
1321 #define ELF_ARCH EM_S390
1322
1323 #include "elf.h"
1324
1325 #define ELF_HWCAP get_elf_hwcap()
1326
1327 #define GET_FEATURE(_feat, _hwcap) \
1328 do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0)
1329
1330 static uint32_t get_elf_hwcap(void)
1331 {
1332 /*
1333 * Let's assume we always have esan3 and zarch.
1334 * 31-bit processes can use 64-bit registers (high gprs).
1335 */
1336 uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS;
1337
1338 GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE);
1339 GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA);
1340 GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP);
1341 GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM);
1342 if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) &&
1343 s390_has_feat(S390_FEAT_ETF3_ENH)) {
1344 hwcap |= HWCAP_S390_ETF3EH;
1345 }
1346 GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS);
1347
1348 return hwcap;
1349 }
1350
1351 static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1352 {
1353 regs->psw.addr = infop->entry;
1354 regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
1355 regs->gprs[15] = infop->start_stack;
1356 }
1357
1358 #endif /* TARGET_S390X */
1359
1360 #ifdef TARGET_TILEGX
1361
1362 /* 42 bits real used address, a half for user mode */
1363 #define ELF_START_MMAP (0x00000020000000000ULL)
1364
1365 #define elf_check_arch(x) ((x) == EM_TILEGX)
1366
1367 #define ELF_CLASS ELFCLASS64
1368 #define ELF_DATA ELFDATA2LSB
1369 #define ELF_ARCH EM_TILEGX
1370
1371 static inline void init_thread(struct target_pt_regs *regs,
1372 struct image_info *infop)
1373 {
1374 regs->pc = infop->entry;
1375 regs->sp = infop->start_stack;
1376
1377 }
1378
1379 #define ELF_EXEC_PAGESIZE 65536 /* TILE-Gx page size is 64KB */
1380
1381 #endif /* TARGET_TILEGX */
1382
1383 #ifdef TARGET_RISCV
1384
1385 #define ELF_START_MMAP 0x80000000
1386 #define ELF_ARCH EM_RISCV
1387
1388 #ifdef TARGET_RISCV32
1389 #define ELF_CLASS ELFCLASS32
1390 #else
1391 #define ELF_CLASS ELFCLASS64
1392 #endif
1393
1394 static inline void init_thread(struct target_pt_regs *regs,
1395 struct image_info *infop)
1396 {
1397 regs->sepc = infop->entry;
1398 regs->sp = infop->start_stack;
1399 }
1400
1401 #define ELF_EXEC_PAGESIZE 4096
1402
1403 #endif /* TARGET_RISCV */
1404
1405 #ifdef TARGET_HPPA
1406
1407 #define ELF_START_MMAP 0x80000000
1408 #define ELF_CLASS ELFCLASS32
1409 #define ELF_ARCH EM_PARISC
1410 #define ELF_PLATFORM "PARISC"
1411 #define STACK_GROWS_DOWN 0
1412 #define STACK_ALIGNMENT 64
1413
1414 static inline void init_thread(struct target_pt_regs *regs,
1415 struct image_info *infop)
1416 {
1417 regs->iaoq[0] = infop->entry;
1418 regs->iaoq[1] = infop->entry + 4;
1419 regs->gr[23] = 0;
1420 regs->gr[24] = infop->arg_start;
1421 regs->gr[25] = (infop->arg_end - infop->arg_start) / sizeof(abi_ulong);
1422 /* The top-of-stack contains a linkage buffer. */
1423 regs->gr[30] = infop->start_stack + 64;
1424 regs->gr[31] = infop->entry;
1425 }
1426
1427 #endif /* TARGET_HPPA */
1428
1429 #ifdef TARGET_XTENSA
1430
1431 #define ELF_START_MMAP 0x20000000
1432
1433 #define ELF_CLASS ELFCLASS32
1434 #define ELF_ARCH EM_XTENSA
1435
1436 static inline void init_thread(struct target_pt_regs *regs,
1437 struct image_info *infop)
1438 {
1439 regs->windowbase = 0;
1440 regs->windowstart = 1;
1441 regs->areg[1] = infop->start_stack;
1442 regs->pc = infop->entry;
1443 }
1444
1445 /* See linux kernel: arch/xtensa/include/asm/elf.h. */
1446 #define ELF_NREG 128
1447 typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
1448
1449 enum {
1450 TARGET_REG_PC,
1451 TARGET_REG_PS,
1452 TARGET_REG_LBEG,
1453 TARGET_REG_LEND,
1454 TARGET_REG_LCOUNT,
1455 TARGET_REG_SAR,
1456 TARGET_REG_WINDOWSTART,
1457 TARGET_REG_WINDOWBASE,
1458 TARGET_REG_THREADPTR,
1459 TARGET_REG_AR0 = 64,
1460 };
1461
1462 static void elf_core_copy_regs(target_elf_gregset_t *regs,
1463 const CPUXtensaState *env)
1464 {
1465 unsigned i;
1466
1467 (*regs)[TARGET_REG_PC] = tswapreg(env->pc);
1468 (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM);
1469 (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]);
1470 (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]);
1471 (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]);
1472 (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]);
1473 (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]);
1474 (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]);
1475 (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]);
1476 xtensa_sync_phys_from_window((CPUXtensaState *)env);
1477 for (i = 0; i < env->config->nareg; ++i) {
1478 (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]);
1479 }
1480 }
1481
1482 #define USE_ELF_CORE_DUMP
1483 #define ELF_EXEC_PAGESIZE 4096
1484
1485 #endif /* TARGET_XTENSA */
1486
1487 #ifndef ELF_PLATFORM
1488 #define ELF_PLATFORM (NULL)
1489 #endif
1490
1491 #ifndef ELF_MACHINE
1492 #define ELF_MACHINE ELF_ARCH
1493 #endif
1494
1495 #ifndef elf_check_arch
1496 #define elf_check_arch(x) ((x) == ELF_ARCH)
1497 #endif
1498
1499 #ifndef elf_check_abi
1500 #define elf_check_abi(x) (1)
1501 #endif
1502
1503 #ifndef ELF_HWCAP
1504 #define ELF_HWCAP 0
1505 #endif
1506
1507 #ifndef STACK_GROWS_DOWN
1508 #define STACK_GROWS_DOWN 1
1509 #endif
1510
1511 #ifndef STACK_ALIGNMENT
1512 #define STACK_ALIGNMENT 16
1513 #endif
1514
1515 #ifdef TARGET_ABI32
1516 #undef ELF_CLASS
1517 #define ELF_CLASS ELFCLASS32
1518 #undef bswaptls
1519 #define bswaptls(ptr) bswap32s(ptr)
1520 #endif
1521
1522 #include "elf.h"
1523
1524 struct exec
1525 {
1526 unsigned int a_info; /* Use macros N_MAGIC, etc for access */
1527 unsigned int a_text; /* length of text, in bytes */
1528 unsigned int a_data; /* length of data, in bytes */
1529 unsigned int a_bss; /* length of uninitialized data area, in bytes */
1530 unsigned int a_syms; /* length of symbol table data in file, in bytes */
1531 unsigned int a_entry; /* start address */
1532 unsigned int a_trsize; /* length of relocation info for text, in bytes */
1533 unsigned int a_drsize; /* length of relocation info for data, in bytes */
1534 };
1535
1536
1537 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
1538 #define OMAGIC 0407
1539 #define NMAGIC 0410
1540 #define ZMAGIC 0413
1541 #define QMAGIC 0314
1542
1543 /* Necessary parameters */
1544 #define TARGET_ELF_EXEC_PAGESIZE \
1545 (((eppnt->p_align & ~qemu_host_page_mask) != 0) ? \
1546 TARGET_PAGE_SIZE : MAX(qemu_host_page_size, TARGET_PAGE_SIZE))
1547 #define TARGET_ELF_PAGELENGTH(_v) ROUND_UP((_v), TARGET_ELF_EXEC_PAGESIZE)
1548 #define TARGET_ELF_PAGESTART(_v) ((_v) & \
1549 ~(abi_ulong)(TARGET_ELF_EXEC_PAGESIZE-1))
1550 #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
1551
1552 #define DLINFO_ITEMS 16
1553
1554 static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
1555 {
1556 memcpy(to, from, n);
1557 }
1558
1559 #ifdef BSWAP_NEEDED
1560 static void bswap_ehdr(struct elfhdr *ehdr)
1561 {
1562 bswap16s(&ehdr->e_type); /* Object file type */
1563 bswap16s(&ehdr->e_machine); /* Architecture */
1564 bswap32s(&ehdr->e_version); /* Object file version */
1565 bswaptls(&ehdr->e_entry); /* Entry point virtual address */
1566 bswaptls(&ehdr->e_phoff); /* Program header table file offset */
1567 bswaptls(&ehdr->e_shoff); /* Section header table file offset */
1568 bswap32s(&ehdr->e_flags); /* Processor-specific flags */
1569 bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
1570 bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
1571 bswap16s(&ehdr->e_phnum); /* Program header table entry count */
1572 bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
1573 bswap16s(&ehdr->e_shnum); /* Section header table entry count */
1574 bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
1575 }
1576
1577 static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1578 {
1579 int i;
1580 for (i = 0; i < phnum; ++i, ++phdr) {
1581 bswap32s(&phdr->p_type); /* Segment type */
1582 bswap32s(&phdr->p_flags); /* Segment flags */
1583 bswaptls(&phdr->p_offset); /* Segment file offset */
1584 bswaptls(&phdr->p_vaddr); /* Segment virtual address */
1585 bswaptls(&phdr->p_paddr); /* Segment physical address */
1586 bswaptls(&phdr->p_filesz); /* Segment size in file */
1587 bswaptls(&phdr->p_memsz); /* Segment size in memory */
1588 bswaptls(&phdr->p_align); /* Segment alignment */
1589 }
1590 }
1591
1592 static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1593 {
1594 int i;
1595 for (i = 0; i < shnum; ++i, ++shdr) {
1596 bswap32s(&shdr->sh_name);
1597 bswap32s(&shdr->sh_type);
1598 bswaptls(&shdr->sh_flags);
1599 bswaptls(&shdr->sh_addr);
1600 bswaptls(&shdr->sh_offset);
1601 bswaptls(&shdr->sh_size);
1602 bswap32s(&shdr->sh_link);
1603 bswap32s(&shdr->sh_info);
1604 bswaptls(&shdr->sh_addralign);
1605 bswaptls(&shdr->sh_entsize);
1606 }
1607 }
1608
1609 static void bswap_sym(struct elf_sym *sym)
1610 {
1611 bswap32s(&sym->st_name);
1612 bswaptls(&sym->st_value);
1613 bswaptls(&sym->st_size);
1614 bswap16s(&sym->st_shndx);
1615 }
1616
1617 #ifdef TARGET_MIPS
1618 static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags)
1619 {
1620 bswap16s(&abiflags->version);
1621 bswap32s(&abiflags->ases);
1622 bswap32s(&abiflags->isa_ext);
1623 bswap32s(&abiflags->flags1);
1624 bswap32s(&abiflags->flags2);
1625 }
1626 #endif
1627 #else
1628 static inline void bswap_ehdr(struct elfhdr *ehdr) { }
1629 static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
1630 static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1631 static inline void bswap_sym(struct elf_sym *sym) { }
1632 #ifdef TARGET_MIPS
1633 static inline void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) { }
1634 #endif
1635 #endif
1636
1637 #ifdef USE_ELF_CORE_DUMP
1638 static int elf_core_dump(int, const CPUArchState *);
1639 #endif /* USE_ELF_CORE_DUMP */
1640 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1641
1642 /* Verify the portions of EHDR within E_IDENT for the target.
1643 This can be performed before bswapping the entire header. */
1644 static bool elf_check_ident(struct elfhdr *ehdr)
1645 {
1646 return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1647 && ehdr->e_ident[EI_MAG1] == ELFMAG1
1648 && ehdr->e_ident[EI_MAG2] == ELFMAG2
1649 && ehdr->e_ident[EI_MAG3] == ELFMAG3
1650 && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1651 && ehdr->e_ident[EI_DATA] == ELF_DATA
1652 && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1653 }
1654
1655 /* Verify the portions of EHDR outside of E_IDENT for the target.
1656 This has to wait until after bswapping the header. */
1657 static bool elf_check_ehdr(struct elfhdr *ehdr)
1658 {
1659 return (elf_check_arch(ehdr->e_machine)
1660 && elf_check_abi(ehdr->e_flags)
1661 && ehdr->e_ehsize == sizeof(struct elfhdr)
1662 && ehdr->e_phentsize == sizeof(struct elf_phdr)
1663 && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1664 }
1665
1666 /*
1667 * 'copy_elf_strings()' copies argument/envelope strings from user
1668 * memory to free pages in kernel mem. These are in a format ready
1669 * to be put directly into the top of new user memory.
1670 *
1671 */
1672 static abi_ulong copy_elf_strings(int argc, char **argv, char *scratch,
1673 abi_ulong p, abi_ulong stack_limit)
1674 {
1675 char *tmp;
1676 int len, i;
1677 abi_ulong top = p;
1678
1679 if (!p) {
1680 return 0; /* bullet-proofing */
1681 }
1682
1683 if (STACK_GROWS_DOWN) {
1684 int offset = ((p - 1) % TARGET_PAGE_SIZE) + 1;
1685 for (i = argc - 1; i >= 0; --i) {
1686 tmp = argv[i];
1687 if (!tmp) {
1688 fprintf(stderr, "VFS: argc is wrong");
1689 exit(-1);
1690 }
1691 len = strlen(tmp) + 1;
1692 tmp += len;
1693
1694 if (len > (p - stack_limit)) {
1695 return 0;
1696 }
1697 while (len) {
1698 int bytes_to_copy = (len > offset) ? offset : len;
1699 tmp -= bytes_to_copy;
1700 p -= bytes_to_copy;
1701 offset -= bytes_to_copy;
1702 len -= bytes_to_copy;
1703
1704 memcpy_fromfs(scratch + offset, tmp, bytes_to_copy);
1705
1706 if (offset == 0) {
1707 memcpy_to_target(p, scratch, top - p);
1708 top = p;
1709 offset = TARGET_PAGE_SIZE;
1710 }
1711 }
1712 }
1713 if (p != top) {
1714 memcpy_to_target(p, scratch + offset, top - p);
1715 }
1716 } else {
1717 int remaining = TARGET_PAGE_SIZE - (p % TARGET_PAGE_SIZE);
1718 for (i = 0; i < argc; ++i) {
1719 tmp = argv[i];
1720 if (!tmp) {
1721 fprintf(stderr, "VFS: argc is wrong");
1722 exit(-1);
1723 }
1724 len = strlen(tmp) + 1;
1725 if (len > (stack_limit - p)) {
1726 return 0;
1727 }
1728 while (len) {
1729 int bytes_to_copy = (len > remaining) ? remaining : len;
1730
1731 memcpy_fromfs(scratch + (p - top), tmp, bytes_to_copy);
1732
1733 tmp += bytes_to_copy;
1734 remaining -= bytes_to_copy;
1735 p += bytes_to_copy;
1736 len -= bytes_to_copy;
1737
1738 if (remaining == 0) {
1739 memcpy_to_target(top, scratch, p - top);
1740 top = p;
1741 remaining = TARGET_PAGE_SIZE;
1742 }
1743 }
1744 }
1745 if (p != top) {
1746 memcpy_to_target(top, scratch, p - top);
1747 }
1748 }
1749
1750 return p;
1751 }
1752
1753 /* Older linux kernels provide up to MAX_ARG_PAGES (default: 32) of
1754 * argument/environment space. Newer kernels (>2.6.33) allow more,
1755 * dependent on stack size, but guarantee at least 32 pages for
1756 * backwards compatibility.
1757 */
1758 #define STACK_LOWER_LIMIT (32 * TARGET_PAGE_SIZE)
1759
1760 static abi_ulong setup_arg_pages(struct linux_binprm *bprm,
1761 struct image_info *info)
1762 {
1763 abi_ulong size, error, guard;
1764
1765 size = guest_stack_size;
1766 if (size < STACK_LOWER_LIMIT) {
1767 size = STACK_LOWER_LIMIT;
1768 }
1769 guard = TARGET_PAGE_SIZE;
1770 if (guard < qemu_real_host_page_size) {
1771 guard = qemu_real_host_page_size;
1772 }
1773
1774 error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1775 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1776 if (error == -1) {
1777 perror("mmap stack");
1778 exit(-1);
1779 }
1780
1781 /* We reserve one extra page at the top of the stack as guard. */
1782 if (STACK_GROWS_DOWN) {
1783 target_mprotect(error, guard, PROT_NONE);
1784 info->stack_limit = error + guard;
1785 return info->stack_limit + size - sizeof(void *);
1786 } else {
1787 target_mprotect(error + size, guard, PROT_NONE);
1788 info->stack_limit = error + size;
1789 return error;
1790 }
1791 }
1792
1793 /* Map and zero the bss. We need to explicitly zero any fractional pages
1794 after the data section (i.e. bss). */
1795 static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1796 {
1797 uintptr_t host_start, host_map_start, host_end;
1798
1799 last_bss = TARGET_PAGE_ALIGN(last_bss);
1800
1801 /* ??? There is confusion between qemu_real_host_page_size and
1802 qemu_host_page_size here and elsewhere in target_mmap, which
1803 may lead to the end of the data section mapping from the file
1804 not being mapped. At least there was an explicit test and
1805 comment for that here, suggesting that "the file size must
1806 be known". The comment probably pre-dates the introduction
1807 of the fstat system call in target_mmap which does in fact
1808 find out the size. What isn't clear is if the workaround
1809 here is still actually needed. For now, continue with it,
1810 but merge it with the "normal" mmap that would allocate the bss. */
1811
1812 host_start = (uintptr_t) g2h(elf_bss);
1813 host_end = (uintptr_t) g2h(last_bss);
1814 host_map_start = REAL_HOST_PAGE_ALIGN(host_start);
1815
1816 if (host_map_start < host_end) {
1817 void *p = mmap((void *)host_map_start, host_end - host_map_start,
1818 prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1819 if (p == MAP_FAILED) {
1820 perror("cannot mmap brk");
1821 exit(-1);
1822 }
1823 }
1824
1825 /* Ensure that the bss page(s) are valid */
1826 if ((page_get_flags(last_bss-1) & prot) != prot) {
1827 page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot | PAGE_VALID);
1828 }
1829
1830 if (host_start < host_map_start) {
1831 memset((void *)host_start, 0, host_map_start - host_start);
1832 }
1833 }
1834
1835 #ifdef TARGET_ARM
1836 static int elf_is_fdpic(struct elfhdr *exec)
1837 {
1838 return exec->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC;
1839 }
1840 #else
1841 /* Default implementation, always false. */
1842 static int elf_is_fdpic(struct elfhdr *exec)
1843 {
1844 return 0;
1845 }
1846 #endif
1847
1848 static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1849 {
1850 uint16_t n;
1851 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1852
1853 /* elf32_fdpic_loadseg */
1854 n = info->nsegs;
1855 while (n--) {
1856 sp -= 12;
1857 put_user_u32(loadsegs[n].addr, sp+0);
1858 put_user_u32(loadsegs[n].p_vaddr, sp+4);
1859 put_user_u32(loadsegs[n].p_memsz, sp+8);
1860 }
1861
1862 /* elf32_fdpic_loadmap */
1863 sp -= 4;
1864 put_user_u16(0, sp+0); /* version */
1865 put_user_u16(info->nsegs, sp+2); /* nsegs */
1866
1867 info->personality = PER_LINUX_FDPIC;
1868 info->loadmap_addr = sp;
1869
1870 return sp;
1871 }
1872
1873 static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1874 struct elfhdr *exec,
1875 struct image_info *info,
1876 struct image_info *interp_info)
1877 {
1878 abi_ulong sp;
1879 abi_ulong u_argc, u_argv, u_envp, u_auxv;
1880 int size;
1881 int i;
1882 abi_ulong u_rand_bytes;
1883 uint8_t k_rand_bytes[16];
1884 abi_ulong u_platform;
1885 const char *k_platform;
1886 const int n = sizeof(elf_addr_t);
1887
1888 sp = p;
1889
1890 /* Needs to be before we load the env/argc/... */
1891 if (elf_is_fdpic(exec)) {
1892 /* Need 4 byte alignment for these structs */
1893 sp &= ~3;
1894 sp = loader_build_fdpic_loadmap(info, sp);
1895 info->other_info = interp_info;
1896 if (interp_info) {
1897 interp_info->other_info = info;
1898 sp = loader_build_fdpic_loadmap(interp_info, sp);
1899 info->interpreter_loadmap_addr = interp_info->loadmap_addr;
1900 info->interpreter_pt_dynamic_addr = interp_info->pt_dynamic_addr;
1901 } else {
1902 info->interpreter_loadmap_addr = 0;
1903 info->interpreter_pt_dynamic_addr = 0;
1904 }
1905 }
1906
1907 u_platform = 0;
1908 k_platform = ELF_PLATFORM;
1909 if (k_platform) {
1910 size_t len = strlen(k_platform) + 1;
1911 if (STACK_GROWS_DOWN) {
1912 sp -= (len + n - 1) & ~(n - 1);
1913 u_platform = sp;
1914 /* FIXME - check return value of memcpy_to_target() for failure */
1915 memcpy_to_target(sp, k_platform, len);
1916 } else {
1917 memcpy_to_target(sp, k_platform, len);
1918 u_platform = sp;
1919 sp += len + 1;
1920 }
1921 }
1922
1923 /* Provide 16 byte alignment for the PRNG, and basic alignment for
1924 * the argv and envp pointers.
1925 */
1926 if (STACK_GROWS_DOWN) {
1927 sp = QEMU_ALIGN_DOWN(sp, 16);
1928 } else {
1929 sp = QEMU_ALIGN_UP(sp, 16);
1930 }
1931
1932 /*
1933 * Generate 16 random bytes for userspace PRNG seeding.
1934 */
1935 qemu_guest_getrandom_nofail(k_rand_bytes, sizeof(k_rand_bytes));
1936 if (STACK_GROWS_DOWN) {
1937 sp -= 16;
1938 u_rand_bytes = sp;
1939 /* FIXME - check return value of memcpy_to_target() for failure */
1940 memcpy_to_target(sp, k_rand_bytes, 16);
1941 } else {
1942 memcpy_to_target(sp, k_rand_bytes, 16);
1943 u_rand_bytes = sp;
1944 sp += 16;
1945 }
1946
1947 size = (DLINFO_ITEMS + 1) * 2;
1948 if (k_platform)
1949 size += 2;
1950 #ifdef DLINFO_ARCH_ITEMS
1951 size += DLINFO_ARCH_ITEMS * 2;
1952 #endif
1953 #ifdef ELF_HWCAP2
1954 size += 2;
1955 #endif
1956 info->auxv_len = size * n;
1957
1958 size += envc + argc + 2;
1959 size += 1; /* argc itself */
1960 size *= n;
1961
1962 /* Allocate space and finalize stack alignment for entry now. */
1963 if (STACK_GROWS_DOWN) {
1964 u_argc = QEMU_ALIGN_DOWN(sp - size, STACK_ALIGNMENT);
1965 sp = u_argc;
1966 } else {
1967 u_argc = sp;
1968 sp = QEMU_ALIGN_UP(sp + size, STACK_ALIGNMENT);
1969 }
1970
1971 u_argv = u_argc + n;
1972 u_envp = u_argv + (argc + 1) * n;
1973 u_auxv = u_envp + (envc + 1) * n;
1974 info->saved_auxv = u_auxv;
1975 info->arg_start = u_argv;
1976 info->arg_end = u_argv + argc * n;
1977
1978 /* This is correct because Linux defines
1979 * elf_addr_t as Elf32_Off / Elf64_Off
1980 */
1981 #define NEW_AUX_ENT(id, val) do { \
1982 put_user_ual(id, u_auxv); u_auxv += n; \
1983 put_user_ual(val, u_auxv); u_auxv += n; \
1984 } while(0)
1985
1986 #ifdef ARCH_DLINFO
1987 /*
1988 * ARCH_DLINFO must come first so platform specific code can enforce
1989 * special alignment requirements on the AUXV if necessary (eg. PPC).
1990 */
1991 ARCH_DLINFO;
1992 #endif
1993 /* There must be exactly DLINFO_ITEMS entries here, or the assert
1994 * on info->auxv_len will trigger.
1995 */
1996 NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1997 NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1998 NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1999 if ((info->alignment & ~qemu_host_page_mask) != 0) {
2000 /* Target doesn't support host page size alignment */
2001 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
2002 } else {
2003 NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(MAX(TARGET_PAGE_SIZE,
2004 qemu_host_page_size)));
2005 }
2006 NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
2007 NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
2008 NEW_AUX_ENT(AT_ENTRY, info->entry);
2009 NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
2010 NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
2011 NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
2012 NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
2013 NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
2014 NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
2015 NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
2016 NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE));
2017 NEW_AUX_ENT(AT_EXECFN, info->file_string);
2018
2019 #ifdef ELF_HWCAP2
2020 NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2);
2021 #endif
2022
2023 if (u_platform) {
2024 NEW_AUX_ENT(AT_PLATFORM, u_platform);
2025 }
2026 NEW_AUX_ENT (AT_NULL, 0);
2027 #undef NEW_AUX_ENT
2028
2029 /* Check that our initial calculation of the auxv length matches how much
2030 * we actually put into it.
2031 */
2032 assert(info->auxv_len == u_auxv - info->saved_auxv);
2033
2034 put_user_ual(argc, u_argc);
2035
2036 p = info->arg_strings;
2037 for (i = 0; i < argc; ++i) {
2038 put_user_ual(p, u_argv);
2039 u_argv += n;
2040 p += target_strlen(p) + 1;
2041 }
2042 put_user_ual(0, u_argv);
2043
2044 p = info->env_strings;
2045 for (i = 0; i < envc; ++i) {
2046 put_user_ual(p, u_envp);
2047 u_envp += n;
2048 p += target_strlen(p) + 1;
2049 }
2050 put_user_ual(0, u_envp);
2051
2052 return sp;
2053 }
2054
2055 #ifndef ARM_COMMPAGE
2056 #define ARM_COMMPAGE 0
2057 #define init_guest_commpage() true
2058 #endif
2059
2060 static void pgb_fail_in_use(const char *image_name)
2061 {
2062 error_report("%s: requires virtual address space that is in use "
2063 "(omit the -B option or choose a different value)",
2064 image_name);
2065 exit(EXIT_FAILURE);
2066 }
2067
2068 static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
2069 abi_ulong guest_hiaddr, long align)
2070 {
2071 const int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
2072 void *addr, *test;
2073
2074 if (!QEMU_IS_ALIGNED(guest_base, align)) {
2075 fprintf(stderr, "Requested guest base 0x%lx does not satisfy "
2076 "host minimum alignment (0x%lx)\n",
2077 guest_base, align);
2078 exit(EXIT_FAILURE);
2079 }
2080
2081 /* Sanity check the guest binary. */
2082 if (reserved_va) {
2083 if (guest_hiaddr > reserved_va) {
2084 error_report("%s: requires more than reserved virtual "
2085 "address space (0x%" PRIx64 " > 0x%lx)",
2086 image_name, (uint64_t)guest_hiaddr, reserved_va);
2087 exit(EXIT_FAILURE);
2088 }
2089 } else {
2090 #if HOST_LONG_BITS < TARGET_ABI_BITS
2091 if ((guest_hiaddr - guest_base) > ~(uintptr_t)0) {
2092 error_report("%s: requires more virtual address space "
2093 "than the host can provide (0x%" PRIx64 ")",
2094 image_name, (uint64_t)guest_hiaddr - guest_base);
2095 exit(EXIT_FAILURE);
2096 }
2097 #endif
2098 }
2099
2100 /*
2101 * Expand the allocation to the entire reserved_va.
2102 * Exclude the mmap_min_addr hole.
2103 */
2104 if (reserved_va) {
2105 guest_loaddr = (guest_base >= mmap_min_addr ? 0
2106 : mmap_min_addr - guest_base);
2107 guest_hiaddr = reserved_va;
2108 }
2109
2110 /* Reserve the address space for the binary, or reserved_va. */
2111 test = g2h(guest_loaddr);
2112 addr = mmap(test, guest_hiaddr - guest_loaddr, PROT_NONE, flags, -1, 0);
2113 if (test != addr) {
2114 pgb_fail_in_use(image_name);
2115 }
2116 }
2117
2118 /**
2119 * pgd_find_hole_fallback: potential mmap address
2120 * @guest_size: size of available space
2121 * @brk: location of break
2122 * @align: memory alignment
2123 *
2124 * This is a fallback method for finding a hole in the host address
2125 * space if we don't have the benefit of being able to access
2126 * /proc/self/map. It can potentially take a very long time as we can
2127 * only dumbly iterate up the host address space seeing if the
2128 * allocation would work.
2129 */
2130 static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk,
2131 long align, uintptr_t offset)
2132 {
2133 uintptr_t base;
2134
2135 /* Start (aligned) at the bottom and work our way up */
2136 base = ROUND_UP(mmap_min_addr, align);
2137
2138 while (true) {
2139 uintptr_t align_start, end;
2140 align_start = ROUND_UP(base, align);
2141 end = align_start + guest_size + offset;
2142
2143 /* if brk is anywhere in the range give ourselves some room to grow. */
2144 if (align_start <= brk && brk < end) {
2145 base = brk + (16 * MiB);
2146 continue;
2147 } else if (align_start + guest_size < align_start) {
2148 /* we have run out of space */
2149 return -1;
2150 } else {
2151 int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE |
2152 MAP_FIXED_NOREPLACE;
2153 void * mmap_start = mmap((void *) align_start, guest_size,
2154 PROT_NONE, flags, -1, 0);
2155 if (mmap_start != MAP_FAILED) {
2156 munmap((void *) align_start, guest_size);
2157 if (MAP_FIXED_NOREPLACE || mmap_start == (void *) align_start) {
2158 return (uintptr_t) mmap_start + offset;
2159 }
2160 }
2161 base += qemu_host_page_size;
2162 }
2163 }
2164 }
2165
2166 /* Return value for guest_base, or -1 if no hole found. */
2167 static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size,
2168 long align, uintptr_t offset)
2169 {
2170 GSList *maps, *iter;
2171 uintptr_t this_start, this_end, next_start, brk;
2172 intptr_t ret = -1;
2173
2174 assert(QEMU_IS_ALIGNED(guest_loaddr, align));
2175
2176 maps = read_self_maps();
2177
2178 /* Read brk after we've read the maps, which will malloc. */
2179 brk = (uintptr_t)sbrk(0);
2180
2181 if (!maps) {
2182 return pgd_find_hole_fallback(guest_size, brk, align, offset);
2183 }
2184
2185 /* The first hole is before the first map entry. */
2186 this_start = mmap_min_addr;
2187
2188 for (iter = maps; iter;
2189 this_start = next_start, iter = g_slist_next(iter)) {
2190 uintptr_t align_start, hole_size;
2191
2192 this_end = ((MapInfo *)iter->data)->start;
2193 next_start = ((MapInfo *)iter->data)->end;
2194 align_start = ROUND_UP(this_start + offset, align);
2195
2196 /* Skip holes that are too small. */
2197 if (align_start >= this_end) {
2198 continue;
2199 }
2200 hole_size = this_end - align_start;
2201 if (hole_size < guest_size) {
2202 continue;
2203 }
2204
2205 /* If this hole contains brk, give ourselves some room to grow. */
2206 if (this_start <= brk && brk < this_end) {
2207 hole_size -= guest_size;
2208 if (sizeof(uintptr_t) == 8 && hole_size >= 1 * GiB) {
2209 align_start += 1 * GiB;
2210 } else if (hole_size >= 16 * MiB) {
2211 align_start += 16 * MiB;
2212 } else {
2213 align_start = (this_end - guest_size) & -align;
2214 if (align_start < this_start) {
2215 continue;
2216 }
2217 }
2218 }
2219
2220 /* Record the lowest successful match. */
2221 if (ret < 0) {
2222 ret = align_start - guest_loaddr;
2223 }
2224 /* If this hole contains the identity map, select it. */
2225 if (align_start <= guest_loaddr &&
2226 guest_loaddr + guest_size <= this_end) {
2227 ret = 0;
2228 }
2229 /* If this hole ends above the identity map, stop looking. */
2230 if (this_end >= guest_loaddr) {
2231 break;
2232 }
2233 }
2234 free_self_maps(maps);
2235
2236 return ret;
2237 }
2238
2239 static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
2240 abi_ulong orig_hiaddr, long align)
2241 {
2242 uintptr_t loaddr = orig_loaddr;
2243 uintptr_t hiaddr = orig_hiaddr;
2244 uintptr_t offset = 0;
2245 uintptr_t addr;
2246
2247 if (hiaddr != orig_hiaddr) {
2248 error_report("%s: requires virtual address space that the "
2249 "host cannot provide (0x%" PRIx64 ")",
2250 image_name, (uint64_t)orig_hiaddr);
2251 exit(EXIT_FAILURE);
2252 }
2253
2254 loaddr &= -align;
2255 if (ARM_COMMPAGE) {
2256 /*
2257 * Extend the allocation to include the commpage.
2258 * For a 64-bit host, this is just 4GiB; for a 32-bit host we
2259 * need to ensure there is space bellow the guest_base so we
2260 * can map the commpage in the place needed when the address
2261 * arithmetic wraps around.
2262 */
2263 if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
2264 hiaddr = (uintptr_t) 4 << 30;
2265 } else {
2266 offset = -(ARM_COMMPAGE & -align);
2267 }
2268 }
2269
2270 addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
2271 if (addr == -1) {
2272 /*
2273 * If ARM_COMMPAGE, there *might* be a non-consecutive allocation
2274 * that can satisfy both. But as the normal arm32 link base address
2275 * is ~32k, and we extend down to include the commpage, making the
2276 * overhead only ~96k, this is unlikely.
2277 */
2278 error_report("%s: Unable to allocate %#zx bytes of "
2279 "virtual address space", image_name,
2280 (size_t)(hiaddr - loaddr));
2281 exit(EXIT_FAILURE);
2282 }
2283
2284 guest_base = addr;
2285 }
2286
2287 static void pgb_dynamic(const char *image_name, long align)
2288 {
2289 /*
2290 * The executable is dynamic and does not require a fixed address.
2291 * All we need is a commpage that satisfies align.
2292 * If we do not need a commpage, leave guest_base == 0.
2293 */
2294 if (ARM_COMMPAGE) {
2295 uintptr_t addr, commpage;
2296
2297 /* 64-bit hosts should have used reserved_va. */
2298 assert(sizeof(uintptr_t) == 4);
2299
2300 /*
2301 * By putting the commpage at the first hole, that puts guest_base
2302 * just above that, and maximises the positive guest addresses.
2303 */
2304 commpage = ARM_COMMPAGE & -align;
2305 addr = pgb_find_hole(commpage, -commpage, align, 0);
2306 assert(addr != -1);
2307 guest_base = addr;
2308 }
2309 }
2310
2311 static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
2312 abi_ulong guest_hiaddr, long align)
2313 {
2314 int flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
2315 void *addr, *test;
2316
2317 if (guest_hiaddr > reserved_va) {
2318 error_report("%s: requires more than reserved virtual "
2319 "address space (0x%" PRIx64 " > 0x%lx)",
2320 image_name, (uint64_t)guest_hiaddr, reserved_va);
2321 exit(EXIT_FAILURE);
2322 }
2323
2324 /* Widen the "image" to the entire reserved address space. */
2325 pgb_static(image_name, 0, reserved_va, align);
2326
2327 /* osdep.h defines this as 0 if it's missing */
2328 flags |= MAP_FIXED_NOREPLACE;
2329
2330 /* Reserve the memory on the host. */
2331 assert(guest_base != 0);
2332 test = g2h(0);
2333 addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
2334 if (addr == MAP_FAILED || addr != test) {
2335 error_report("Unable to reserve 0x%lx bytes of virtual address "
2336 "space at %p (%s) for use as guest address space (check your"
2337 "virtual memory ulimit setting, min_mmap_addr or reserve less "
2338 "using -R option)", reserved_va, test, strerror(errno));
2339 exit(EXIT_FAILURE);
2340 }
2341 }
2342
2343 void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
2344 abi_ulong guest_hiaddr)
2345 {
2346 /* In order to use host shmat, we must be able to honor SHMLBA. */
2347 uintptr_t align = MAX(SHMLBA, qemu_host_page_size);
2348
2349 if (have_guest_base) {
2350 pgb_have_guest_base(image_name, guest_loaddr, guest_hiaddr, align);
2351 } else if (reserved_va) {
2352 pgb_reserved_va(image_name, guest_loaddr, guest_hiaddr, align);
2353 } else if (guest_loaddr) {
2354 pgb_static(image_name, guest_loaddr, guest_hiaddr, align);
2355 } else {
2356 pgb_dynamic(image_name, align);
2357 }
2358
2359 /* Reserve and initialize the commpage. */
2360 if (!init_guest_commpage()) {
2361 /*
2362 * With have_guest_base, the user has selected the address and
2363 * we are trying to work with that. Otherwise, we have selected
2364 * free space and init_guest_commpage must succeeded.
2365 */
2366 assert(have_guest_base);
2367 pgb_fail_in_use(image_name);
2368 }
2369
2370 assert(QEMU_IS_ALIGNED(guest_base, align));
2371 qemu_log_mask(CPU_LOG_PAGE, "Locating guest address space "
2372 "@ 0x%" PRIx64 "\n", (uint64_t)guest_base);
2373 }
2374
2375 /* Load an ELF image into the address space.
2376
2377 IMAGE_NAME is the filename of the image, to use in error messages.
2378 IMAGE_FD is the open file descriptor for the image.
2379
2380 BPRM_BUF is a copy of the beginning of the file; this of course
2381 contains the elf file header at offset 0. It is assumed that this
2382 buffer is sufficiently aligned to present no problems to the host
2383 in accessing data at aligned offsets within the buffer.
2384
2385 On return: INFO values will be filled in, as necessary or available. */
2386
2387 static void load_elf_image(const char *image_name, int image_fd,
2388 struct image_info *info, char **pinterp_name,
2389 char bprm_buf[BPRM_BUF_SIZE])
2390 {
2391 struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
2392 struct elf_phdr *phdr;
2393 abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
2394 int i, retval;
2395 const char *errmsg;
2396
2397 /* First of all, some simple consistency checks */
2398 errmsg = "Invalid ELF image for this architecture";
2399 if (!elf_check_ident(ehdr)) {
2400 goto exit_errmsg;
2401 }
2402 bswap_ehdr(ehdr);
2403 if (!elf_check_ehdr(ehdr)) {
2404 goto exit_errmsg;
2405 }
2406
2407 i = ehdr->e_phnum * sizeof(struct elf_phdr);
2408 if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
2409 phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
2410 } else {
2411 phdr = (struct elf_phdr *) alloca(i);
2412 retval = pread(image_fd, phdr, i, ehdr->e_phoff);
2413 if (retval != i) {
2414 goto exit_read;
2415 }
2416 }
2417 bswap_phdr(phdr, ehdr->e_phnum);
2418
2419 info->nsegs = 0;
2420 info->pt_dynamic_addr = 0;
2421
2422 mmap_lock();
2423
2424 /* Find the maximum size of the image and allocate an appropriate
2425 amount of memory to handle that. */
2426 loaddr = -1, hiaddr = 0;
2427 info->alignment = 0;
2428 for (i = 0; i < ehdr->e_phnum; ++i) {
2429 if (phdr[i].p_type == PT_LOAD) {
2430 abi_ulong a = phdr[i].p_vaddr - phdr[i].p_offset;
2431 if (a < loaddr) {
2432 loaddr = a;
2433 }
2434 a = phdr[i].p_vaddr + phdr[i].p_memsz;
2435 if (a > hiaddr) {
2436 hiaddr = a;
2437 }
2438 ++info->nsegs;
2439 info->alignment |= phdr[i].p_align;
2440 }
2441 }
2442
2443 if (pinterp_name != NULL) {
2444 /*
2445 * This is the main executable.
2446 *
2447 * Reserve extra space for brk.
2448 * We hold on to this space while placing the interpreter
2449 * and the stack, lest they be placed immediately after
2450 * the data segment and block allocation from the brk.
2451 *
2452 * 16MB is chosen as "large enough" without being so large
2453 * as to allow the result to not fit with a 32-bit guest on
2454 * a 32-bit host.
2455 */
2456 info->reserve_brk = 16 * MiB;
2457 hiaddr += info->reserve_brk;
2458
2459 if (ehdr->e_type == ET_EXEC) {
2460 /*
2461 * Make sure that the low address does not conflict with
2462 * MMAP_MIN_ADDR or the QEMU application itself.
2463 */
2464 probe_guest_base(image_name, loaddr, hiaddr);
2465 } else {
2466 /*
2467 * The binary is dynamic, but we still need to
2468 * select guest_base. In this case we pass a size.
2469 */
2470 probe_guest_base(image_name, 0, hiaddr - loaddr);
2471 }
2472 }
2473
2474 /*
2475 * Reserve address space for all of this.
2476 *
2477 * In the case of ET_EXEC, we supply MAP_FIXED so that we get
2478 * exactly the address range that is required.
2479 *
2480 * Otherwise this is ET_DYN, and we are searching for a location
2481 * that can hold the memory space required. If the image is
2482 * pre-linked, LOADDR will be non-zero, and the kernel should
2483 * honor that address if it happens to be free.
2484 *
2485 * In both cases, we will overwrite pages in this range with mappings
2486 * from the executable.
2487 */
2488 load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
2489 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE |
2490 (ehdr->e_type == ET_EXEC ? MAP_FIXED : 0),
2491 -1, 0);
2492 if (load_addr == -1) {
2493 goto exit_perror;
2494 }
2495 load_bias = load_addr - loaddr;
2496
2497 if (elf_is_fdpic(ehdr)) {
2498 struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
2499 g_malloc(sizeof(*loadsegs) * info->nsegs);
2500
2501 for (i = 0; i < ehdr->e_phnum; ++i) {
2502 switch (phdr[i].p_type) {
2503 case PT_DYNAMIC:
2504 info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
2505 break;
2506 case PT_LOAD:
2507 loadsegs->addr = phdr[i].p_vaddr + load_bias;
2508 loadsegs->p_vaddr = phdr[i].p_vaddr;
2509 loadsegs->p_memsz = phdr[i].p_memsz;
2510 ++loadsegs;
2511 break;
2512 }
2513 }
2514 }
2515
2516 info->load_bias = load_bias;
2517 info->code_offset = load_bias;
2518 info->data_offset = load_bias;
2519 info->load_addr = load_addr;
2520 info->entry = ehdr->e_entry + load_bias;
2521 info->start_code = -1;
2522 info->end_code = 0;
2523 info->start_data = -1;
2524 info->end_data = 0;
2525 info->brk = 0;
2526 info->elf_flags = ehdr->e_flags;
2527
2528 for (i = 0; i < ehdr->e_phnum; i++) {
2529 struct elf_phdr *eppnt = phdr + i;
2530 if (eppnt->p_type == PT_LOAD) {
2531 abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em, vaddr_len;
2532 int elf_prot = 0;
2533
2534 if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
2535 if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
2536 if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
2537
2538 vaddr = load_bias + eppnt->p_vaddr;
2539 vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
2540 vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
2541 vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
2542
2543 /*
2544 * Some segments may be completely empty without any backing file
2545 * segment, in that case just let zero_bss allocate an empty buffer
2546 * for it.
2547 */
2548 if (eppnt->p_filesz != 0) {
2549 error = target_mmap(vaddr_ps, vaddr_len, elf_prot,
2550 MAP_PRIVATE | MAP_FIXED,
2551 image_fd, eppnt->p_offset - vaddr_po);
2552
2553 if (error == -1) {
2554 goto exit_perror;
2555 }
2556 }
2557
2558 vaddr_ef = vaddr + eppnt->p_filesz;
2559 vaddr_em = vaddr + eppnt->p_memsz;
2560
2561 /* If the load segment requests extra zeros (e.g. bss), map it. */
2562 if (vaddr_ef < vaddr_em) {
2563 zero_bss(vaddr_ef, vaddr_em, elf_prot);
2564 }
2565
2566 /* Find the full program boundaries. */
2567 if (elf_prot & PROT_EXEC) {
2568 if (vaddr < info->start_code) {
2569 info->start_code = vaddr;
2570 }
2571 if (vaddr_ef > info->end_code) {
2572 info->end_code = vaddr_ef;
2573 }
2574 }
2575 if (elf_prot & PROT_WRITE) {
2576 if (vaddr < info->start_data) {
2577 info->start_data = vaddr;
2578 }
2579 if (vaddr_ef > info->end_data) {
2580 info->end_data = vaddr_ef;
2581 }
2582 }
2583 if (vaddr_em > info->brk) {
2584 info->brk = vaddr_em;
2585 }
2586 } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
2587 char *interp_name;
2588
2589 if (*pinterp_name) {
2590 errmsg = "Multiple PT_INTERP entries";
2591 goto exit_errmsg;
2592 }
2593 interp_name = malloc(eppnt->p_filesz);
2594 if (!interp_name) {
2595 goto exit_perror;
2596 }
2597
2598 if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
2599 memcpy(interp_name, bprm_buf + eppnt->p_offset,
2600 eppnt->p_filesz);
2601 } else {
2602 retval = pread(image_fd, interp_name, eppnt->p_filesz,
2603 eppnt->p_offset);
2604 if (retval != eppnt->p_filesz) {
2605 goto exit_perror;
2606 }
2607 }
2608 if (interp_name[eppnt->p_filesz - 1] != 0) {
2609 errmsg = "Invalid PT_INTERP entry";
2610 goto exit_errmsg;
2611 }
2612 *pinterp_name = interp_name;
2613 #ifdef TARGET_MIPS
2614 } else if (eppnt->p_type == PT_MIPS_ABIFLAGS) {
2615 Mips_elf_abiflags_v0 abiflags;
2616 if (eppnt->p_filesz < sizeof(Mips_elf_abiflags_v0)) {
2617 errmsg = "Invalid PT_MIPS_ABIFLAGS entry";
2618 goto exit_errmsg;
2619 }
2620 if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
2621 memcpy(&abiflags, bprm_buf + eppnt->p_offset,
2622 sizeof(Mips_elf_abiflags_v0));
2623 } else {
2624 retval = pread(image_fd, &abiflags, sizeof(Mips_elf_abiflags_v0),
2625 eppnt->p_offset);
2626 if (retval != sizeof(Mips_elf_abiflags_v0)) {
2627 goto exit_perror;
2628 }
2629 }
2630 bswap_mips_abiflags(&abiflags);
2631 info->fp_abi = abiflags.fp_abi;
2632 #endif
2633 }
2634 }
2635
2636 if (info->end_data == 0) {
2637 info->start_data = info->end_code;
2638 info->end_data = info->end_code;
2639 }
2640
2641 if (qemu_log_enabled()) {
2642 load_symbols(ehdr, image_fd, load_bias);
2643 }
2644
2645 mmap_unlock();
2646
2647 close(image_fd);
2648 return;
2649
2650 exit_read:
2651 if (retval >= 0) {
2652 errmsg = "Incomplete read of file header";
2653 goto exit_errmsg;
2654 }
2655 exit_perror:
2656 errmsg = strerror(errno);
2657 exit_errmsg:
2658 fprintf(stderr, "%s: %s\n", image_name, errmsg);
2659 exit(-1);
2660 }
2661
2662 static void load_elf_interp(const char *filename, struct image_info *info,
2663 char bprm_buf[BPRM_BUF_SIZE])
2664 {
2665 int fd, retval;
2666
2667 fd = open(path(filename), O_RDONLY);
2668 if (fd < 0) {
2669 goto exit_perror;
2670 }
2671
2672 retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
2673 if (retval < 0) {
2674 goto exit_perror;
2675 }
2676 if (retval < BPRM_BUF_SIZE) {
2677 memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
2678 }
2679
2680 load_elf_image(filename, fd, info, NULL, bprm_buf);
2681 return;
2682
2683 exit_perror:
2684 fprintf(stderr, "%s: %s\n", filename, strerror(errno));
2685 exit(-1);
2686 }
2687
2688 static int symfind(const void *s0, const void *s1)
2689 {
2690 target_ulong addr = *(target_ulong *)s0;
2691 struct elf_sym *sym = (struct elf_sym *)s1;
2692 int result = 0;
2693 if (addr < sym->st_value) {
2694 result = -1;
2695 } else if (addr >= sym->st_value + sym->st_size) {
2696 result = 1;
2697 }
2698 return result;
2699 }
2700
2701 static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
2702 {
2703 #if ELF_CLASS == ELFCLASS32
2704 struct elf_sym *syms = s->disas_symtab.elf32;
2705 #else
2706 struct elf_sym *syms = s->disas_symtab.elf64;
2707 #endif
2708
2709 // binary search
2710 struct elf_sym *sym;
2711
2712 sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
2713 if (sym != NULL) {
2714 return s->disas_strtab + sym->st_name;
2715 }
2716
2717 return "";
2718 }
2719
2720 /* FIXME: This should use elf_ops.h */
2721 static int symcmp(const void *s0, const void *s1)
2722 {
2723 struct elf_sym *sym0 = (struct elf_sym *)s0;
2724 struct elf_sym *sym1 = (struct elf_sym *)s1;
2725 return (sym0->st_value < sym1->st_value)
2726 ? -1
2727 : ((sym0->st_value > sym1->st_value) ? 1 : 0);
2728 }
2729
2730 /* Best attempt to load symbols from this ELF object. */
2731 static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
2732 {
2733 int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
2734 uint64_t segsz;
2735 struct elf_shdr *shdr;
2736 char *strings = NULL;
2737 struct syminfo *s = NULL;
2738 struct elf_sym *new_syms, *syms = NULL;
2739
2740 shnum = hdr->e_shnum;
2741 i = shnum * sizeof(struct elf_shdr);
2742 shdr = (struct elf_shdr *)alloca(i);
2743 if (pread(fd, shdr, i, hdr->e_shoff) != i) {
2744 return;
2745 }
2746
2747 bswap_shdr(shdr, shnum);
2748 for (i = 0; i < shnum; ++i) {
2749 if (shdr[i].sh_type == SHT_SYMTAB) {
2750 sym_idx = i;
2751 str_idx = shdr[i].sh_link;
2752 goto found;
2753 }
2754 }
2755
2756 /* There will be no symbol table if the file was stripped. */
2757 return;
2758
2759 found:
2760 /* Now know where the strtab and symtab are. Snarf them. */
2761 s = g_try_new(struct syminfo, 1);
2762 if (!s) {
2763 goto give_up;
2764 }
2765
2766 segsz = shdr[str_idx].sh_size;
2767 s->disas_strtab = strings = g_try_malloc(segsz);
2768 if (!strings ||
2769 pread(fd, strings, segsz, shdr[str_idx].sh_offset) != segsz) {
2770 goto give_up;
2771 }
2772
2773 segsz = shdr[sym_idx].sh_size;
2774 syms = g_try_malloc(segsz);
2775 if (!syms || pread(fd, syms, segsz, shdr[sym_idx].sh_offset) != segsz) {
2776 goto give_up;
2777 }
2778
2779 if (segsz / sizeof(struct elf_sym) > INT_MAX) {
2780 /* Implausibly large symbol table: give up rather than ploughing
2781 * on with the number of symbols calculation overflowing
2782 */
2783 goto give_up;
2784 }
2785 nsyms = segsz / sizeof(struct elf_sym);
2786 for (i = 0; i < nsyms; ) {
2787 bswap_sym(syms + i);
2788 /* Throw away entries which we do not need. */
2789 if (syms[i].st_shndx == SHN_UNDEF
2790 || syms[i].st_shndx >= SHN_LORESERVE
2791 || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
2792 if (i < --nsyms) {
2793 syms[i] = syms[nsyms];
2794 }
2795 } else {
2796 #if defined(TARGET_ARM) || defined (TARGET_MIPS)
2797 /* The bottom address bit marks a Thumb or MIPS16 symbol. */
2798 syms[i].st_value &= ~(target_ulong)1;
2799 #endif
2800 syms[i].st_value += load_bias;
2801 i++;
2802 }
2803 }
2804
2805 /* No "useful" symbol. */
2806 if (nsyms == 0) {
2807 goto give_up;
2808 }
2809
2810 /* Attempt to free the storage associated with the local symbols
2811 that we threw away. Whether or not this has any effect on the
2812 memory allocation depends on the malloc implementation and how
2813 many symbols we managed to discard. */
2814 new_syms = g_try_renew(struct elf_sym, syms, nsyms);
2815 if (new_syms == NULL) {
2816 goto give_up;
2817 }
2818 syms = new_syms;
2819
2820 qsort(syms, nsyms, sizeof(*syms), symcmp);
2821
2822 s->disas_num_syms = nsyms;
2823 #if ELF_CLASS == ELFCLASS32
2824 s->disas_symtab.elf32 = syms;
2825 #else
2826 s->disas_symtab.elf64 = syms;
2827 #endif
2828 s->lookup_symbol = lookup_symbolxx;
2829 s->next = syminfos;
2830 syminfos = s;
2831
2832 return;
2833
2834 give_up:
2835 g_free(s);
2836 g_free(strings);
2837 g_free(syms);
2838 }
2839
2840 uint32_t get_elf_eflags(int fd)
2841 {
2842 struct elfhdr ehdr;
2843 off_t offset;
2844 int ret;
2845
2846 /* Read ELF header */
2847 offset = lseek(fd, 0, SEEK_SET);
2848 if (offset == (off_t) -1) {
2849 return 0;
2850 }
2851 ret = read(fd, &ehdr, sizeof(ehdr));
2852 if (ret < sizeof(ehdr)) {
2853 return 0;
2854 }
2855 offset = lseek(fd, offset, SEEK_SET);
2856 if (offset == (off_t) -1) {
2857 return 0;
2858 }
2859
2860 /* Check ELF signature */
2861 if (!elf_check_ident(&ehdr)) {
2862 return 0;
2863 }
2864
2865 /* check header */
2866 bswap_ehdr(&ehdr);
2867 if (!elf_check_ehdr(&ehdr)) {
2868 return 0;
2869 }
2870
2871 /* return architecture id */
2872 return ehdr.e_flags;
2873 }
2874
2875 int load_elf_binary(struct linux_binprm *bprm, struct image_info *info)
2876 {
2877 struct image_info interp_info;
2878 struct elfhdr elf_ex;
2879 char *elf_interpreter = NULL;
2880 char *scratch;
2881
2882 memset(&interp_info, 0, sizeof(interp_info));
2883 #ifdef TARGET_MIPS
2884 interp_info.fp_abi = MIPS_ABI_FP_UNKNOWN;
2885 #endif
2886
2887 info->start_mmap = (abi_ulong)ELF_START_MMAP;
2888
2889 load_elf_image(bprm->filename, bprm->fd, info,
2890 &elf_interpreter, bprm->buf);
2891
2892 /* ??? We need a copy of the elf header for passing to create_elf_tables.
2893 If we do nothing, we'll have overwritten this when we re-use bprm->buf
2894 when we load the interpreter. */
2895 elf_ex = *(struct elfhdr *)bprm->buf;
2896
2897 /* Do this so that we can load the interpreter, if need be. We will
2898 change some of these later */
2899 bprm->p = setup_arg_pages(bprm, info);
2900
2901 scratch = g_new0(char, TARGET_PAGE_SIZE);
2902 if (STACK_GROWS_DOWN) {
2903 bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
2904 bprm->p, info->stack_limit);
2905 info->file_string = bprm->p;
2906 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
2907 bprm->p, info->stack_limit);
2908 info->env_strings = bprm->p;
2909 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
2910 bprm->p, info->stack_limit);
2911 info->arg_strings = bprm->p;
2912 } else {
2913 info->arg_strings = bprm->p;
2914 bprm->p = copy_elf_strings(bprm->argc, bprm->argv, scratch,
2915 bprm->p, info->stack_limit);
2916 info->env_strings = bprm->p;
2917 bprm->p = copy_elf_strings(bprm->envc, bprm->envp, scratch,
2918 bprm->p, info->stack_limit);
2919 info->file_string = bprm->p;
2920 bprm->p = copy_elf_strings(1, &bprm->filename, scratch,
2921 bprm->p, info->stack_limit);
2922 }
2923
2924 g_free(scratch);
2925
2926 if (!bprm->p) {
2927 fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
2928 exit(-1);
2929 }
2930
2931 if (elf_interpreter) {
2932 load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
2933
2934 /* If the program interpreter is one of these two, then assume
2935 an iBCS2 image. Otherwise assume a native linux image. */
2936
2937 if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
2938 || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
2939 info->personality = PER_SVR4;
2940
2941 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
2942 and some applications "depend" upon this behavior. Since
2943 we do not have the power to recompile these, we emulate
2944 the SVr4 behavior. Sigh. */
2945 target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
2946 MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
2947 }
2948 #ifdef TARGET_MIPS
2949 info->interp_fp_abi = interp_info.fp_abi;
2950 #endif
2951 }
2952
2953 bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
2954 info, (elf_interpreter ? &interp_info : NULL));
2955 info->start_stack = bprm->p;
2956
2957 /* If we have an interpreter, set that as the program's entry point.
2958 Copy the load_bias as well, to help PPC64 interpret the entry
2959 point as a function descriptor. Do this after creating elf tables
2960 so that we copy the original program entry point into the AUXV. */
2961 if (elf_interpreter) {
2962 info->load_bias = interp_info.load_bias;
2963 info->entry = interp_info.entry;
2964 free(elf_interpreter);
2965 }
2966
2967 #ifdef USE_ELF_CORE_DUMP
2968 bprm->core_dump = &elf_core_dump;
2969 #endif
2970
2971 /*
2972 * If we reserved extra space for brk, release it now.
2973 * The implementation of do_brk in syscalls.c expects to be able
2974 * to mmap pages in this space.
2975 */
2976 if (info->reserve_brk) {
2977 abi_ulong start_brk = HOST_PAGE_ALIGN(info->brk);
2978 abi_ulong end_brk = HOST_PAGE_ALIGN(info->brk + info->reserve_brk);
2979 target_munmap(start_brk, end_brk - start_brk);
2980 }
2981
2982 return 0;
2983 }
2984
2985 #ifdef USE_ELF_CORE_DUMP
2986 /*
2987 * Definitions to generate Intel SVR4-like core files.
2988 * These mostly have the same names as the SVR4 types with "target_elf_"
2989 * tacked on the front to prevent clashes with linux definitions,
2990 * and the typedef forms have been avoided. This is mostly like
2991 * the SVR4 structure, but more Linuxy, with things that Linux does
2992 * not support and which gdb doesn't really use excluded.
2993 *
2994 * Fields we don't dump (their contents is zero) in linux-user qemu
2995 * are marked with XXX.
2996 *
2997 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
2998 *
2999 * Porting ELF coredump for target is (quite) simple process. First you
3000 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
3001 * the target resides):
3002 *
3003 * #define USE_ELF_CORE_DUMP
3004 *
3005 * Next you define type of register set used for dumping. ELF specification
3006 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
3007 *
3008 * typedef <target_regtype> target_elf_greg_t;
3009 * #define ELF_NREG <number of registers>
3010 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
3011 *
3012 * Last step is to implement target specific function that copies registers
3013 * from given cpu into just specified register set. Prototype is:
3014 *
3015 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
3016 * const CPUArchState *env);
3017 *
3018 * Parameters:
3019 * regs - copy register values into here (allocated and zeroed by caller)
3020 * env - copy registers from here
3021 *
3022 * Example for ARM target is provided in this file.
3023 */
3024
3025 /* An ELF note in memory */
3026 struct memelfnote {
3027 const char *name;
3028 size_t namesz;
3029 size_t namesz_rounded;
3030 int type;
3031 size_t datasz;
3032 size_t datasz_rounded;
3033 void *data;
3034 size_t notesz;
3035 };
3036
3037 struct target_elf_siginfo {
3038 abi_int si_signo; /* signal number */
3039 abi_int si_code; /* extra code */
3040 abi_int si_errno; /* errno */
3041 };
3042
3043 struct target_elf_prstatus {
3044 struct target_elf_siginfo pr_info; /* Info associated with signal */
3045 abi_short pr_cursig; /* Current signal */
3046 abi_ulong pr_sigpend; /* XXX */
3047 abi_ulong pr_sighold; /* XXX */
3048 target_pid_t pr_pid;
3049 target_pid_t pr_ppid;
3050 target_pid_t pr_pgrp;
3051 target_pid_t pr_sid;
3052 struct target_timeval pr_utime; /* XXX User time */
3053 struct target_timeval pr_stime; /* XXX System time */
3054 struct target_timeval pr_cutime; /* XXX Cumulative user time */
3055 struct target_timeval pr_cstime; /* XXX Cumulative system time */
3056 target_elf_gregset_t pr_reg; /* GP registers */
3057 abi_int pr_fpvalid; /* XXX */
3058 };
3059
3060 #define ELF_PRARGSZ (80) /* Number of chars for args */
3061
3062 struct target_elf_prpsinfo {
3063 char pr_state; /* numeric process state */
3064 char pr_sname; /* char for pr_state */
3065 char pr_zomb; /* zombie */
3066 char pr_nice; /* nice val */
3067 abi_ulong pr_flag; /* flags */
3068 target_uid_t pr_uid;
3069 target_gid_t pr_gid;
3070 target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
3071 /* Lots missing */
3072 char pr_fname[16] QEMU_NONSTRING; /* filename of executable */
3073 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
3074 };
3075
3076 /* Here is the structure in which status of each thread is captured. */
3077 struct elf_thread_status {
3078 QTAILQ_ENTRY(elf_thread_status) ets_link;
3079 struct target_elf_prstatus prstatus; /* NT_PRSTATUS */
3080 #if 0
3081 elf_fpregset_t fpu; /* NT_PRFPREG */
3082 struct task_struct *thread;
3083 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
3084 #endif
3085 struct memelfnote notes[1];
3086 int num_notes;
3087 };
3088
3089 struct elf_note_info {
3090 struct memelfnote *notes;
3091 struct target_elf_prstatus *prstatus; /* NT_PRSTATUS */
3092 struct target_elf_prpsinfo *psinfo; /* NT_PRPSINFO */
3093
3094 QTAILQ_HEAD(, elf_thread_status) thread_list;
3095 #if 0
3096 /*
3097 * Current version of ELF coredump doesn't support
3098 * dumping fp regs etc.
3099 */
3100 elf_fpregset_t *fpu;
3101 elf_fpxregset_t *xfpu;
3102 int thread_status_size;
3103 #endif
3104 int notes_size;
3105 int numnote;
3106 };
3107
3108 struct vm_area_struct {
3109 target_ulong vma_start; /* start vaddr of memory region */
3110 target_ulong vma_end; /* end vaddr of memory region */
3111 abi_ulong vma_flags; /* protection etc. flags for the region */
3112 QTAILQ_ENTRY(vm_area_struct) vma_link;
3113 };
3114
3115 struct mm_struct {
3116 QTAILQ_HEAD(, vm_area_struct) mm_mmap;
3117 int mm_count; /* number of mappings */
3118 };
3119
3120 static struct mm_struct *vma_init(void);
3121 static void vma_delete(struct mm_struct *);
3122 static int vma_add_mapping(struct mm_struct *, target_ulong,
3123 target_ulong, abi_ulong);
3124 static int vma_get_mapping_count(const struct mm_struct *);
3125 static struct vm_area_struct *vma_first(const struct mm_struct *);
3126 static struct vm_area_struct *vma_next(struct vm_area_struct *);
3127 static abi_ulong vma_dump_size(const struct vm_area_struct *);
3128 static int vma_walker(void *priv, target_ulong start, target_ulong end,
3129 unsigned long flags);
3130
3131 static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
3132 static void fill_note(struct memelfnote *, const char *, int,
3133 unsigned int, void *);
3134 static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
3135 static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
3136 static void fill_auxv_note(struct memelfnote *, const TaskState *);
3137 static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
3138 static size_t note_size(const struct memelfnote *);
3139 static void free_note_info(struct elf_note_info *);
3140 static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
3141 static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
3142 static int core_dump_filename(const TaskState *, char *, size_t);
3143
3144 static int dump_write(int, const void *, size_t);
3145 static int write_note(struct memelfnote *, int);
3146 static int write_note_info(struct elf_note_info *, int);
3147
3148 #ifdef BSWAP_NEEDED
3149 static void bswap_prstatus(struct target_elf_prstatus *prstatus)
3150 {
3151 prstatus->pr_info.si_signo = tswap32(prstatus->pr_info.si_signo);
3152 prstatus->pr_info.si_code = tswap32(prstatus->pr_info.si_code);
3153 prstatus->pr_info.si_errno = tswap32(prstatus->pr_info.si_errno);
3154 prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
3155 prstatus->pr_sigpend = tswapal(prstatus->pr_sigpend);
3156 prstatus->pr_sighold = tswapal(prstatus->pr_sighold);
3157 prstatus->pr_pid = tswap32(prstatus->pr_pid);
3158 prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
3159 prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
3160 prstatus->pr_sid = tswap32(prstatus->pr_sid);
3161 /* cpu times are not filled, so we skip them */
3162 /* regs should be in correct format already */
3163 prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
3164 }
3165
3166 static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
3167 {
3168 psinfo->pr_flag = tswapal(psinfo->pr_flag);
3169 psinfo->pr_uid = tswap16(psinfo->pr_uid);
3170 psinfo->pr_gid = tswap16(psinfo->pr_gid);
3171 psinfo->pr_pid = tswap32(psinfo->pr_pid);
3172 psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
3173 psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
3174 psinfo->pr_sid = tswap32(psinfo->pr_sid);
3175 }
3176
3177 static void bswap_note(struct elf_note *en)
3178 {
3179 bswap32s(&en->n_namesz);
3180 bswap32s(&en->n_descsz);
3181 bswap32s(&en->n_type);
3182 }
3183 #else
3184 static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
3185 static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
3186 static inline void bswap_note(struct elf_note *en) { }
3187 #endif /* BSWAP_NEEDED */
3188
3189 /*
3190 * Minimal support for linux memory regions. These are needed
3191 * when we are finding out what memory exactly belongs to
3192 * emulated process. No locks needed here, as long as
3193 * thread that received the signal is stopped.
3194 */
3195
3196 static struct mm_struct *vma_init(void)
3197 {
3198 struct mm_struct *mm;
3199
3200 if ((mm = g_malloc(sizeof (*mm))) == NULL)
3201 return (NULL);
3202
3203 mm->mm_count = 0;
3204 QTAILQ_INIT(&mm->mm_mmap);
3205
3206 return (mm);
3207 }
3208
3209 static void vma_delete(struct mm_struct *mm)
3210 {
3211 struct vm_area_struct *vma;
3212
3213 while ((vma = vma_first(mm)) != NULL) {
3214 QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
3215 g_free(vma);
3216 }
3217 g_free(mm);
3218 }
3219
3220 static int vma_add_mapping(struct mm_struct *mm, target_ulong start,
3221 target_ulong end, abi_ulong flags)
3222 {
3223 struct vm_area_struct *vma;
3224
3225 if ((vma = g_malloc0(sizeof (*vma))) == NULL)
3226 return (-1);
3227
3228 vma->vma_start = start;
3229 vma->vma_end = end;
3230 vma->vma_flags = flags;
3231
3232 QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
3233 mm->mm_count++;
3234
3235 return (0);
3236 }
3237
3238 static struct vm_area_struct *vma_first(const struct mm_struct *mm)
3239 {
3240 return (QTAILQ_FIRST(&mm->mm_mmap));
3241 }
3242
3243 static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
3244 {
3245 return (QTAILQ_NEXT(vma, vma_link));
3246 }
3247
3248 static int vma_get_mapping_count(const struct mm_struct *mm)
3249 {
3250 return (mm->mm_count);
3251 }
3252
3253 /*
3254 * Calculate file (dump) size of given memory region.
3255 */
3256 static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
3257 {
3258 /* if we cannot even read the first page, skip it */
3259 if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
3260 return (0);
3261
3262 /*
3263 * Usually we don't dump executable pages as they contain
3264 * non-writable code that debugger can read directly from
3265 * target library etc. However, thread stacks are marked
3266 * also executable so we read in first page of given region
3267 * and check whether it contains elf header. If there is
3268 * no elf header, we dump it.
3269 */
3270 if (vma->vma_flags & PROT_EXEC) {
3271 char page[TARGET_PAGE_SIZE];
3272
3273 copy_from_user(page, vma->vma_start, sizeof (page));
3274 if ((page[EI_MAG0] == ELFMAG0) &&
3275 (page[EI_MAG1] == ELFMAG1) &&
3276 (page[EI_MAG2] == ELFMAG2) &&
3277 (page[EI_MAG3] == ELFMAG3)) {
3278 /*
3279 * Mappings are possibly from ELF binary. Don't dump
3280 * them.
3281 */
3282 return (0);
3283 }
3284 }
3285
3286 return (vma->vma_end - vma->vma_start);
3287 }
3288
3289 static int vma_walker(void *priv, target_ulong start, target_ulong end,
3290 unsigned long flags)
3291 {
3292 struct mm_struct *mm = (struct mm_struct *)priv;
3293
3294 vma_add_mapping(mm, start, end, flags);
3295 return (0);
3296 }
3297
3298 static void fill_note(struct memelfnote *note, const char *name, int type,
3299 unsigned int sz, void *data)
3300 {
3301 unsigned int namesz;
3302
3303 namesz = strlen(name) + 1;
3304 note->name = name;
3305 note->namesz = namesz;
3306 note->namesz_rounded = roundup(namesz, sizeof (int32_t));
3307 note->type = type;
3308 note->datasz = sz;
3309 note->datasz_rounded = roundup(sz, sizeof (int32_t));
3310
3311 note->data = data;
3312
3313 /*
3314 * We calculate rounded up note size here as specified by
3315 * ELF document.
3316 */
3317 note->notesz = sizeof (struct elf_note) +
3318 note->namesz_rounded + note->datasz_rounded;
3319 }
3320
3321 static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
3322