2 * IA-64-specific support for kernel module loader.
4 * Copyright (C) 2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Loosely based on patch by Rusty Russell.
10 /* relocs tested so far:
21 PCREL21B (for br.call only; br.cond is not supported out of modules!)
22 PCREL60B (for brl.cond only; brl.call is not supported for modules!)
29 #include <linux/kernel.h>
30 #include <linux/sched.h>
31 #include <linux/elf.h>
32 #include <linux/moduleloader.h>
33 #include <linux/string.h>
34 #include <linux/vmalloc.h>
36 #include <asm/patch.h>
37 #include <asm/unaligned.h>
39 #define ARCH_MODULE_DEBUG 0
42 # define DEBUGP printk
45 # define DEBUGP(fmt , a...)
54 #define MAX_LTOFF ((uint64_t) (1 << 22)) /* max. allowable linkage-table offset */
56 /* Define some relocation helper macros/types: */
58 #define FORMAT_SHIFT 0
60 #define FORMAT_MASK ((1 << FORMAT_BITS) - 1)
63 #define VALUE_MASK ((1 << VALUE_BITS) - 1)
65 enum reloc_target_format {
66 /* direct encoded formats: */
76 /* formats that cannot be directly decoded: */
78 RF_INSN21B, /* imm21 form 1 */
79 RF_INSN21M, /* imm21 form 2 */
80 RF_INSN21F /* imm21 form 3 */
83 enum reloc_value_formula {
84 RV_DIRECT = 4, /* S + A */
85 RV_GPREL = 5, /* @gprel(S + A) */
86 RV_LTREL = 6, /* @ltoff(S + A) */
87 RV_PLTREL = 7, /* @pltoff(S + A) */
88 RV_FPTR = 8, /* @fptr(S + A) */
89 RV_PCREL = 9, /* S + A - P */
90 RV_LTREL_FPTR = 10, /* @ltoff(@fptr(S + A)) */
91 RV_SEGREL = 11, /* @segrel(S + A) */
92 RV_SECREL = 12, /* @secrel(S + A) */
93 RV_BDREL = 13, /* BD + A */
94 RV_LTV = 14, /* S + A (like RV_DIRECT, except frozen at static link-time) */
95 RV_PCREL2 = 15, /* S + A - P */
96 RV_SPECIAL = 16, /* various (see below) */
98 RV_TPREL = 18, /* @tprel(S + A) */
99 RV_LTREL_TPREL = 19, /* @ltoff(@tprel(S + A)) */
100 RV_DTPMOD = 20, /* @dtpmod(S + A) */
101 RV_LTREL_DTPMOD = 21, /* @ltoff(@dtpmod(S + A)) */
102 RV_DTPREL = 22, /* @dtprel(S + A) */
103 RV_LTREL_DTPREL = 23, /* @ltoff(@dtprel(S + A)) */
108 /* 28-31 reserved for implementation-specific purposes. */
111 #define N(reloc) [R_IA64_##reloc] = #reloc
113 static const char *reloc_name[256] = {
114 N(NONE), N(IMM14), N(IMM22), N(IMM64),
115 N(DIR32MSB), N(DIR32LSB), N(DIR64MSB), N(DIR64LSB),
116 N(GPREL22), N(GPREL64I), N(GPREL32MSB), N(GPREL32LSB),
117 N(GPREL64MSB), N(GPREL64LSB), N(LTOFF22), N(LTOFF64I),
118 N(PLTOFF22), N(PLTOFF64I), N(PLTOFF64MSB), N(PLTOFF64LSB),
119 N(FPTR64I), N(FPTR32MSB), N(FPTR32LSB), N(FPTR64MSB),
120 N(FPTR64LSB), N(PCREL60B), N(PCREL21B), N(PCREL21M),
121 N(PCREL21F), N(PCREL32MSB), N(PCREL32LSB), N(PCREL64MSB),
122 N(PCREL64LSB), N(LTOFF_FPTR22), N(LTOFF_FPTR64I), N(LTOFF_FPTR32MSB),
123 N(LTOFF_FPTR32LSB), N(LTOFF_FPTR64MSB), N(LTOFF_FPTR64LSB), N(SEGREL32MSB),
124 N(SEGREL32LSB), N(SEGREL64MSB), N(SEGREL64LSB), N(SECREL32MSB),
125 N(SECREL32LSB), N(SECREL64MSB), N(SECREL64LSB), N(REL32MSB),
126 N(REL32LSB), N(REL64MSB), N(REL64LSB), N(LTV32MSB),
127 N(LTV32LSB), N(LTV64MSB), N(LTV64LSB), N(PCREL21BI),
128 N(PCREL22), N(PCREL64I), N(IPLTMSB), N(IPLTLSB),
129 N(COPY), N(LTOFF22X), N(LDXMOV), N(TPREL14),
130 N(TPREL22), N(TPREL64I), N(TPREL64MSB), N(TPREL64LSB),
131 N(LTOFF_TPREL22), N(DTPMOD64MSB), N(DTPMOD64LSB), N(LTOFF_DTPMOD22),
132 N(DTPREL14), N(DTPREL22), N(DTPREL64I), N(DTPREL32MSB),
133 N(DTPREL32LSB), N(DTPREL64MSB), N(DTPREL64LSB), N(LTOFF_DTPREL22)
138 /* Opaque struct for insns, to protect against derefs. */
141 static inline uint64_t
142 bundle (const struct insn *insn)
144 return (uint64_t) insn & ~0xfUL;
148 slot (const struct insn *insn)
150 return (uint64_t) insn & 0x3;
154 apply_imm64 (struct module *mod, struct insn *insn, uint64_t val)
156 if (slot(insn) != 2) {
157 printk(KERN_ERR "%s: invalid slot number %d for IMM64\n",
158 mod->name, slot(insn));
161 ia64_patch_imm64((u64) insn, val);
166 apply_imm60 (struct module *mod, struct insn *insn, uint64_t val)
168 if (slot(insn) != 2) {
169 printk(KERN_ERR "%s: invalid slot number %d for IMM60\n",
170 mod->name, slot(insn));
173 if (val + ((uint64_t) 1 << 59) >= (1UL << 60)) {
174 printk(KERN_ERR "%s: value %ld out of IMM60 range\n",
175 mod->name, (long) val);
178 ia64_patch_imm60((u64) insn, val);
183 apply_imm22 (struct module *mod, struct insn *insn, uint64_t val)
185 if (val + (1 << 21) >= (1 << 22)) {
186 printk(KERN_ERR "%s: value %li out of IMM22 range\n",
187 mod->name, (long)val);
190 ia64_patch((u64) insn, 0x01fffcfe000UL, ( ((val & 0x200000UL) << 15) /* bit 21 -> 36 */
191 | ((val & 0x1f0000UL) << 6) /* bit 16 -> 22 */
192 | ((val & 0x00ff80UL) << 20) /* bit 7 -> 27 */
193 | ((val & 0x00007fUL) << 13) /* bit 0 -> 13 */));
198 apply_imm21b (struct module *mod, struct insn *insn, uint64_t val)
200 if (val + (1 << 20) >= (1 << 21)) {
201 printk(KERN_ERR "%s: value %li out of IMM21b range\n",
202 mod->name, (long)val);
205 ia64_patch((u64) insn, 0x11ffffe000UL, ( ((val & 0x100000UL) << 16) /* bit 20 -> 36 */
206 | ((val & 0x0fffffUL) << 13) /* bit 0 -> 13 */));
213 /* Three instruction bundles in PLT. */
214 unsigned char bundle[2][16];
217 static const struct plt_entry ia64_plt_template = {
220 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
222 0x00, 0x00, 0x00, 0x60
225 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* brl.many gp=TARGET_GP */
227 0x08, 0x00, 0x00, 0xc0
233 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
235 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_gp)
236 && apply_imm60(mod, (struct insn *) (plt->bundle[1] + 2),
237 (target_ip - (int64_t) plt->bundle[1]) / 16))
243 plt_target (struct plt_entry *plt)
245 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[1];
248 b0 = b[0]; b1 = b[1];
249 off = ( ((b1 & 0x00fffff000000000UL) >> 36) /* imm20b -> bit 0 */
250 | ((b0 >> 48) << 20) | ((b1 & 0x7fffffUL) << 36) /* imm39 -> bit 20 */
251 | ((b1 & 0x0800000000000000UL) << 0)); /* i -> bit 59 */
252 return (long) plt->bundle[1] + 16*off;
258 /* Three instruction bundles in PLT. */
259 unsigned char bundle[3][16];
262 static const struct plt_entry ia64_plt_template = {
265 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* movl r16=TARGET_IP */
267 0x02, 0x00, 0x00, 0x60
270 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0 */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x20, /* movl gp=TARGET_GP */
272 0x00, 0x00, 0x00, 0x60
275 0x11, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MIB] nop.m 0 */
276 0x60, 0x80, 0x04, 0x80, 0x03, 0x00, /* mov b6=r16 */
277 0x60, 0x00, 0x80, 0x00 /* br.few b6 */
283 patch_plt (struct module *mod, struct plt_entry *plt, long target_ip, unsigned long target_gp)
285 if (apply_imm64(mod, (struct insn *) (plt->bundle[0] + 2), target_ip)
286 && apply_imm64(mod, (struct insn *) (plt->bundle[1] + 2), target_gp))
292 plt_target (struct plt_entry *plt)
294 uint64_t b0, b1, *b = (uint64_t *) plt->bundle[0];
296 b0 = b[0]; b1 = b[1];
297 return ( ((b1 & 0x000007f000000000) >> 36) /* imm7b -> bit 0 */
298 | ((b1 & 0x07fc000000000000) >> 43) /* imm9d -> bit 7 */
299 | ((b1 & 0x0003e00000000000) >> 29) /* imm5c -> bit 16 */
300 | ((b1 & 0x0000100000000000) >> 23) /* ic -> bit 21 */
301 | ((b0 >> 46) << 22) | ((b1 & 0x7fffff) << 40) /* imm41 -> bit 22 */
302 | ((b1 & 0x0800000000000000) << 4)); /* i -> bit 63 */
305 #endif /* !USE_BRL */
308 module_arch_freeing_init (struct module *mod)
310 if (mod->arch.init_unw_table) {
311 unw_remove_unwind_table(mod->arch.init_unw_table);
312 mod->arch.init_unw_table = NULL;
316 /* Have we already seen one of these relocations? */
317 /* FIXME: we could look in other sections, too --RR */
319 duplicate_reloc (const Elf64_Rela *rela, unsigned int num)
323 for (i = 0; i < num; i++) {
324 if (rela[i].r_info == rela[num].r_info && rela[i].r_addend == rela[num].r_addend)
330 /* Count how many GOT entries we may need */
332 count_gots (const Elf64_Rela *rela, unsigned int num)
334 unsigned int i, ret = 0;
336 /* Sure, this is order(n^2), but it's usually short, and not
338 for (i = 0; i < num; i++) {
339 switch (ELF64_R_TYPE(rela[i].r_info)) {
341 case R_IA64_LTOFF22X:
342 case R_IA64_LTOFF64I:
343 case R_IA64_LTOFF_FPTR22:
344 case R_IA64_LTOFF_FPTR64I:
345 case R_IA64_LTOFF_FPTR32MSB:
346 case R_IA64_LTOFF_FPTR32LSB:
347 case R_IA64_LTOFF_FPTR64MSB:
348 case R_IA64_LTOFF_FPTR64LSB:
349 if (!duplicate_reloc(rela, i))
357 /* Count how many PLT entries we may need */
359 count_plts (const Elf64_Rela *rela, unsigned int num)
361 unsigned int i, ret = 0;
363 /* Sure, this is order(n^2), but it's usually short, and not
365 for (i = 0; i < num; i++) {
366 switch (ELF64_R_TYPE(rela[i].r_info)) {
367 case R_IA64_PCREL21B:
368 case R_IA64_PLTOFF22:
369 case R_IA64_PLTOFF64I:
370 case R_IA64_PLTOFF64MSB:
371 case R_IA64_PLTOFF64LSB:
374 if (!duplicate_reloc(rela, i))
382 /* We need to create an function-descriptors for any internal function
383 which is referenced. */
385 count_fdescs (const Elf64_Rela *rela, unsigned int num)
387 unsigned int i, ret = 0;
389 /* Sure, this is order(n^2), but it's usually short, and not time critical. */
390 for (i = 0; i < num; i++) {
391 switch (ELF64_R_TYPE(rela[i].r_info)) {
393 case R_IA64_FPTR32LSB:
394 case R_IA64_FPTR32MSB:
395 case R_IA64_FPTR64LSB:
396 case R_IA64_FPTR64MSB:
397 case R_IA64_LTOFF_FPTR22:
398 case R_IA64_LTOFF_FPTR32LSB:
399 case R_IA64_LTOFF_FPTR32MSB:
400 case R_IA64_LTOFF_FPTR64I:
401 case R_IA64_LTOFF_FPTR64LSB:
402 case R_IA64_LTOFF_FPTR64MSB:
406 * Jumps to static functions sometimes go straight to their
407 * offset. Of course, that may not be possible if the jump is
408 * from init -> core or vice. versa, so we need to generate an
409 * FDESC (and PLT etc) for that.
411 case R_IA64_PCREL21B:
412 if (!duplicate_reloc(rela, i))
421 module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
424 unsigned long core_plts = 0, init_plts = 0, gots = 0, fdescs = 0;
425 Elf64_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
428 * To store the PLTs and function-descriptors, we expand the .text section for
429 * core module-code and the .init.text section for initialization code.
431 for (s = sechdrs; s < sechdrs_end; ++s)
432 if (strcmp(".core.plt", secstrings + s->sh_name) == 0)
433 mod->arch.core_plt = s;
434 else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
435 mod->arch.init_plt = s;
436 else if (strcmp(".got", secstrings + s->sh_name) == 0)
438 else if (strcmp(".opd", secstrings + s->sh_name) == 0)
440 else if (strcmp(".IA_64.unwind", secstrings + s->sh_name) == 0)
441 mod->arch.unwind = s;
442 #ifdef CONFIG_PARAVIRT
443 else if (strcmp(".paravirt_bundles",
444 secstrings + s->sh_name) == 0)
445 mod->arch.paravirt_bundles = s;
446 else if (strcmp(".paravirt_insts",
447 secstrings + s->sh_name) == 0)
448 mod->arch.paravirt_insts = s;
451 if (!mod->arch.core_plt || !mod->arch.init_plt || !mod->arch.got || !mod->arch.opd) {
452 printk(KERN_ERR "%s: sections missing\n", mod->name);
456 /* GOT and PLTs can occur in any relocated section... */
457 for (s = sechdrs + 1; s < sechdrs_end; ++s) {
458 const Elf64_Rela *rels = (void *)ehdr + s->sh_offset;
459 unsigned long numrels = s->sh_size/sizeof(Elf64_Rela);
461 if (s->sh_type != SHT_RELA)
464 gots += count_gots(rels, numrels);
465 fdescs += count_fdescs(rels, numrels);
466 if (strstr(secstrings + s->sh_name, ".init"))
467 init_plts += count_plts(rels, numrels);
469 core_plts += count_plts(rels, numrels);
472 mod->arch.core_plt->sh_type = SHT_NOBITS;
473 mod->arch.core_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
474 mod->arch.core_plt->sh_addralign = 16;
475 mod->arch.core_plt->sh_size = core_plts * sizeof(struct plt_entry);
476 mod->arch.init_plt->sh_type = SHT_NOBITS;
477 mod->arch.init_plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
478 mod->arch.init_plt->sh_addralign = 16;
479 mod->arch.init_plt->sh_size = init_plts * sizeof(struct plt_entry);
480 mod->arch.got->sh_type = SHT_NOBITS;
481 mod->arch.got->sh_flags = ARCH_SHF_SMALL | SHF_ALLOC;
482 mod->arch.got->sh_addralign = 8;
483 mod->arch.got->sh_size = gots * sizeof(struct got_entry);
484 mod->arch.opd->sh_type = SHT_NOBITS;
485 mod->arch.opd->sh_flags = SHF_ALLOC;
486 mod->arch.opd->sh_addralign = 8;
487 mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc);
488 DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n",
489 __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size,
490 mod->arch.got->sh_size, mod->arch.opd->sh_size);
495 in_init (const struct module *mod, uint64_t addr)
497 return addr - (uint64_t) mod->module_init < mod->init_size;
501 in_core (const struct module *mod, uint64_t addr)
503 return addr - (uint64_t) mod->module_core < mod->core_size;
507 is_internal (const struct module *mod, uint64_t value)
509 return in_init(mod, value) || in_core(mod, value);
513 * Get gp-relative offset for the linkage-table entry of VALUE.
516 get_ltoff (struct module *mod, uint64_t value, int *okp)
518 struct got_entry *got, *e;
523 got = (void *) mod->arch.got->sh_addr;
524 for (e = got; e < got + mod->arch.next_got_entry; ++e)
528 /* Not enough GOT entries? */
529 BUG_ON(e >= (struct got_entry *) (mod->arch.got->sh_addr + mod->arch.got->sh_size));
532 ++mod->arch.next_got_entry;
534 return (uint64_t) e - mod->arch.gp;
538 gp_addressable (struct module *mod, uint64_t value)
540 return value - mod->arch.gp + MAX_LTOFF/2 < MAX_LTOFF;
543 /* Get PC-relative PLT entry for this value. Returns 0 on failure. */
545 get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp)
547 struct plt_entry *plt, *plt_end;
548 uint64_t target_ip, target_gp;
553 if (in_init(mod, (uint64_t) insn)) {
554 plt = (void *) mod->arch.init_plt->sh_addr;
555 plt_end = (void *) plt + mod->arch.init_plt->sh_size;
557 plt = (void *) mod->arch.core_plt->sh_addr;
558 plt_end = (void *) plt + mod->arch.core_plt->sh_size;
561 /* "value" is a pointer to a function-descriptor; fetch the target ip/gp from it: */
562 target_ip = ((uint64_t *) value)[0];
563 target_gp = ((uint64_t *) value)[1];
565 /* Look for existing PLT entry. */
566 while (plt->bundle[0][0]) {
567 if (plt_target(plt) == target_ip)
569 if (++plt >= plt_end)
572 *plt = ia64_plt_template;
573 if (!patch_plt(mod, plt, target_ip, target_gp)) {
577 #if ARCH_MODULE_DEBUG
578 if (plt_target(plt) != target_ip) {
579 printk("%s: mistargeted PLT: wanted %lx, got %lx\n",
580 __func__, target_ip, plt_target(plt));
586 return (uint64_t) plt;
589 /* Get function descriptor for VALUE. */
591 get_fdesc (struct module *mod, uint64_t value, int *okp)
593 struct fdesc *fdesc = (void *) mod->arch.opd->sh_addr;
599 printk(KERN_ERR "%s: fdesc for zero requested!\n", mod->name);
603 if (!is_internal(mod, value))
605 * If it's not a module-local entry-point, "value" already points to a
606 * function-descriptor.
610 /* Look for existing function descriptor. */
612 if (fdesc->ip == value)
613 return (uint64_t)fdesc;
614 if ((uint64_t) ++fdesc >= mod->arch.opd->sh_addr + mod->arch.opd->sh_size)
620 fdesc->gp = mod->arch.gp;
621 return (uint64_t) fdesc;
625 do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
626 Elf64_Shdr *sec, void *location)
628 enum reloc_target_format format = (r_type >> FORMAT_SHIFT) & FORMAT_MASK;
629 enum reloc_value_formula formula = (r_type >> VALUE_SHIFT) & VALUE_MASK;
633 val = sym->st_value + addend;
636 case RV_SEGREL: /* segment base is arbitrarily chosen to be 0 for kernel modules */
640 case RV_GPREL: val -= mod->arch.gp; break;
641 case RV_LTREL: val = get_ltoff(mod, val, &ok); break;
642 case RV_PLTREL: val = get_plt(mod, location, val, &ok); break;
643 case RV_FPTR: val = get_fdesc(mod, val, &ok); break;
644 case RV_SECREL: val -= sec->sh_addr; break;
645 case RV_LTREL_FPTR: val = get_ltoff(mod, get_fdesc(mod, val, &ok), &ok); break;
649 case R_IA64_PCREL21B:
650 if ((in_init(mod, val) && in_core(mod, (uint64_t)location)) ||
651 (in_core(mod, val) && in_init(mod, (uint64_t)location))) {
653 * Init section may have been allocated far away from core,
654 * if the branch won't reach, then allocate a plt for it.
656 uint64_t delta = ((int64_t)val - (int64_t)location) / 16;
657 if (delta + (1 << 20) >= (1 << 21)) {
658 val = get_fdesc(mod, val, &ok);
659 val = get_plt(mod, location, val, &ok);
661 } else if (!is_internal(mod, val))
662 val = get_plt(mod, location, val, &ok);
665 val -= bundle(location);
668 case R_IA64_PCREL32MSB:
669 case R_IA64_PCREL32LSB:
670 case R_IA64_PCREL64MSB:
671 case R_IA64_PCREL64LSB:
672 val -= (uint64_t) location;
677 case R_IA64_PCREL60B: format = RF_INSN60; break;
678 case R_IA64_PCREL21B: format = RF_INSN21B; break;
679 case R_IA64_PCREL21M: format = RF_INSN21M; break;
680 case R_IA64_PCREL21F: format = RF_INSN21F; break;
686 val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
690 /* can link-time value relocs happen here? */
695 if (r_type == R_IA64_PCREL21BI) {
696 if (!is_internal(mod, val)) {
697 printk(KERN_ERR "%s: %s reloc against "
698 "non-local symbol (%lx)\n", __func__,
699 reloc_name[r_type], (unsigned long)val);
704 val -= bundle(location);
711 val = get_fdesc(mod, get_plt(mod, location, val, &ok), &ok);
713 if (r_type == R_IA64_IPLTMSB)
718 val = addend - sym->st_value;
722 case R_IA64_LTOFF22X:
723 if (gp_addressable(mod, val))
726 val = get_ltoff(mod, val, &ok);
731 if (gp_addressable(mod, val)) {
732 /* turn "ld8" into "mov": */
733 DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location);
734 ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL);
739 if (reloc_name[r_type])
740 printk(KERN_ERR "%s: special reloc %s not supported",
741 mod->name, reloc_name[r_type]);
743 printk(KERN_ERR "%s: unknown special reloc %x\n",
752 case RV_LTREL_DTPMOD:
754 case RV_LTREL_DTPREL:
755 printk(KERN_ERR "%s: %s reloc not supported\n",
756 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?");
760 printk(KERN_ERR "%s: unknown reloc %x\n", mod->name, r_type);
767 DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val,
768 reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend);
771 case RF_INSN21B: ok = apply_imm21b(mod, location, (int64_t) val / 16); break;
772 case RF_INSN22: ok = apply_imm22(mod, location, val); break;
773 case RF_INSN64: ok = apply_imm64(mod, location, val); break;
774 case RF_INSN60: ok = apply_imm60(mod, location, (int64_t) val / 16); break;
775 case RF_32LSB: put_unaligned(val, (uint32_t *) location); break;
776 case RF_64LSB: put_unaligned(val, (uint64_t *) location); break;
777 case RF_32MSB: /* ia64 Linux is little-endian... */
778 case RF_64MSB: /* ia64 Linux is little-endian... */
779 case RF_INSN14: /* must be within-module, i.e., resolved by "ld -r" */
780 case RF_INSN21M: /* must be within-module, i.e., resolved by "ld -r" */
781 case RF_INSN21F: /* must be within-module, i.e., resolved by "ld -r" */
782 printk(KERN_ERR "%s: format %u needed by %s reloc is not supported\n",
783 mod->name, format, reloc_name[r_type] ? reloc_name[r_type] : "?");
787 printk(KERN_ERR "%s: relocation %s resulted in unknown format %u\n",
788 mod->name, reloc_name[r_type] ? reloc_name[r_type] : "?", format);
791 return ok ? 0 : -ENOEXEC;
795 apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
796 unsigned int relsec, struct module *mod)
798 unsigned int i, n = sechdrs[relsec].sh_size / sizeof(Elf64_Rela);
799 Elf64_Rela *rela = (void *) sechdrs[relsec].sh_addr;
800 Elf64_Shdr *target_sec;
803 DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__,
804 relsec, n, sechdrs[relsec].sh_info);
806 target_sec = sechdrs + sechdrs[relsec].sh_info;
808 if (target_sec->sh_entsize == ~0UL)
810 * If target section wasn't allocated, we don't need to relocate it.
811 * Happens, e.g., for debug sections.
817 * XXX Should have an arch-hook for running this after final section
818 * addresses have been selected...
821 if (mod->core_size > MAX_LTOFF)
823 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
824 * at the end of the module.
826 gp = mod->core_size - MAX_LTOFF / 2;
828 gp = mod->core_size / 2;
829 gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
831 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
834 for (i = 0; i < n; i++) {
835 ret = do_reloc(mod, ELF64_R_TYPE(rela[i].r_info),
836 ((Elf64_Sym *) sechdrs[symindex].sh_addr
837 + ELF64_R_SYM(rela[i].r_info)),
838 rela[i].r_addend, target_sec,
839 (void *) target_sec->sh_addr + rela[i].r_offset);
847 * Modules contain a single unwind table which covers both the core and the init text
848 * sections but since the two are not contiguous, we need to split this table up such that
849 * we can register (and unregister) each "segment" separately. Fortunately, this sounds
850 * more complicated than it really is.
853 register_unwind_table (struct module *mod)
855 struct unw_table_entry *start = (void *) mod->arch.unwind->sh_addr;
856 struct unw_table_entry *end = start + mod->arch.unwind->sh_size / sizeof (*start);
857 struct unw_table_entry tmp, *e1, *e2, *core, *init;
858 unsigned long num_init = 0, num_core = 0;
860 /* First, count how many init and core unwind-table entries there are. */
861 for (e1 = start; e1 < end; ++e1)
862 if (in_init(mod, e1->start_offset))
867 * Second, sort the table such that all unwind-table entries for the init and core
868 * text sections are nicely separated. We do this with a stupid bubble sort
869 * (unwind tables don't get ridiculously huge).
871 for (e1 = start; e1 < end; ++e1) {
872 for (e2 = e1 + 1; e2 < end; ++e2) {
873 if (e2->start_offset < e1->start_offset) {
881 * Third, locate the init and core segments in the unwind table:
883 if (in_init(mod, start->start_offset)) {
885 core = start + num_init;
888 init = start + num_core;
891 DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__,
892 mod->name, mod->arch.gp, num_init, num_core);
895 * Fourth, register both tables (if not empty).
898 mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
899 core, core + num_core);
900 DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__,
901 mod->arch.core_unw_table, core, core + num_core);
904 mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp,
905 init, init + num_init);
906 DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__,
907 mod->arch.init_unw_table, init, init + num_init);
912 module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod)
914 DEBUGP("%s: init: entry=%p\n", __func__, mod->init);
915 if (mod->arch.unwind)
916 register_unwind_table(mod);
917 #ifdef CONFIG_PARAVIRT
918 if (mod->arch.paravirt_bundles) {
919 struct paravirt_patch_site_bundle *start =
920 (struct paravirt_patch_site_bundle *)
921 mod->arch.paravirt_bundles->sh_addr;
922 struct paravirt_patch_site_bundle *end =
923 (struct paravirt_patch_site_bundle *)
924 (mod->arch.paravirt_bundles->sh_addr +
925 mod->arch.paravirt_bundles->sh_size);
927 paravirt_patch_apply_bundle(start, end);
929 if (mod->arch.paravirt_insts) {
930 struct paravirt_patch_site_inst *start =
931 (struct paravirt_patch_site_inst *)
932 mod->arch.paravirt_insts->sh_addr;
933 struct paravirt_patch_site_inst *end =
934 (struct paravirt_patch_site_inst *)
935 (mod->arch.paravirt_insts->sh_addr +
936 mod->arch.paravirt_insts->sh_size);
938 paravirt_patch_apply_inst(start, end);
945 module_arch_cleanup (struct module *mod)
947 if (mod->arch.init_unw_table)
948 unw_remove_unwind_table(mod->arch.init_unw_table);
949 if (mod->arch.core_unw_table)
950 unw_remove_unwind_table(mod->arch.core_unw_table);