summaryrefslogtreecommitdiffstats
path: root/sys/boot/ia64/common/exec.c
diff options
context:
space:
mode:
authormarcel <marcel@FreeBSD.org>2011-04-03 23:49:20 +0000
committermarcel <marcel@FreeBSD.org>2011-04-03 23:49:20 +0000
commit09f8bacb548dc6d3d4127689c542edcb9ea0dc1e (patch)
tree8f01070f57e7175478f2e6afecfa6c8b092f4ae1 /sys/boot/ia64/common/exec.c
parent57f20b6d96e83eb8b380cc3211f0ddf2cc96469e (diff)
downloadFreeBSD-src-09f8bacb548dc6d3d4127689c542edcb9ea0dc1e.zip
FreeBSD-src-09f8bacb548dc6d3d4127689c542edcb9ea0dc1e.tar.gz
Use the new arch_loadaddr I/F to align ELF objects to PBVM page
boundaries. For good measure, align all other objects to cache lines boundaries. Use the new arch_loadseg I/F to keep track of kernel text and data so that we can wire as much of it as is possible. It is the responsibility of the kernel to link critical (read IVT related) code and data at the front of the respective segment so that it's covered by TRs before the kernel has a chance to add more translations. Use a better way of determining whether we're loading a legacy kernel or not. We can't check for the presence of the PBVM page table, because we may have unloaded that kernel and loaded an older (legacy) kernel after that. Simply use the latest load address for it.
Diffstat (limited to 'sys/boot/ia64/common/exec.c')
-rw-r--r--sys/boot/ia64/common/exec.c131
1 files changed, 92 insertions, 39 deletions
diff --git a/sys/boot/ia64/common/exec.c b/sys/boot/ia64/common/exec.c
index cb91141..6c7f027 100644
--- a/sys/boot/ia64/common/exec.c
+++ b/sys/boot/ia64/common/exec.c
@@ -36,13 +36,20 @@ __FBSDID("$FreeBSD$");
#include <machine/ia64_cpu.h>
#include <machine/pte.h>
-#include <ia64/include/vmparam.h>
-
#include <efi.h>
#include <efilib.h>
#include "libia64.h"
+static u_int itr_idx = 0;
+static u_int dtr_idx = 0;
+
+static vm_offset_t ia64_text_start;
+static size_t ia64_text_size;
+
+static vm_offset_t ia64_data_start;
+static size_t ia64_data_size;
+
static int elf64_exec(struct preloaded_file *amp);
static int elf64_obj_exec(struct preloaded_file *amp);
@@ -61,6 +68,26 @@ struct file_format *file_formats[] = {
NULL
};
+static u_int
+sz2shft(vm_offset_t ofs, vm_size_t sz)
+{
+ vm_size_t s;
+ u_int shft;
+
+ shft = 12; /* Start with 4K */
+ s = 1 << shft;
+ while (s < sz) {
+ shft++;
+ s <<= 1;
+ }
+ do {
+ shft--;
+ s >>= 1;
+ } while (ofs & (s - 1));
+
+ return (shft);
+}
+
/*
* Entered with psr.ic and psr.i both zero.
*/
@@ -84,49 +111,43 @@ enter_kernel(uint64_t start, struct bootinfo *bi)
/* NOTREACHED */
}
-static void
-mmu_wire(vm_offset_t va, vm_paddr_t pa, vm_size_t sz, u_int acc)
+static u_int
+mmu_wire(vm_offset_t va, vm_paddr_t pa, u_int pgshft, u_int acc)
{
- static u_int iidx = 0, didx = 0;
pt_entry_t pte;
- u_int shft;
/* Round up to the smallest possible page size. */
- if (sz < 4096)
- sz = 4096;
- /* Determine the exponent (base 2). */
- shft = 0;
- while (sz > 1) {
- shft++;
- sz >>= 1;
- }
+ if (pgshft < 12)
+ pgshft = 12;
/* Truncate to the largest possible page size (256MB). */
- if (shft > 28)
- shft = 28;
+ if (pgshft > 28)
+ pgshft = 28;
/* Round down to a valid (mappable) page size. */
- if (shft > 14 && (shft & 1) != 0)
- shft--;
+ if (pgshft > 14 && (pgshft & 1) != 0)
+ pgshft--;
pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
PTE_PL_KERN | (acc & PTE_AR_MASK) | (pa & PTE_PPN_MASK);
__asm __volatile("mov cr.ifa=%0" :: "r"(va));
- __asm __volatile("mov cr.itir=%0" :: "r"(shft << 2));
+ __asm __volatile("mov cr.itir=%0" :: "r"(pgshft << 2));
__asm __volatile("srlz.d;;");
- __asm __volatile("ptr.d %0,%1" :: "r"(va), "r"(shft << 2));
+ __asm __volatile("ptr.d %0,%1" :: "r"(va), "r"(pgshft << 2));
__asm __volatile("srlz.d;;");
- __asm __volatile("itr.d dtr[%0]=%1" :: "r"(didx), "r"(pte));
+ __asm __volatile("itr.d dtr[%0]=%1" :: "r"(dtr_idx), "r"(pte));
__asm __volatile("srlz.d;;");
- didx++;
+ dtr_idx++;
- if (acc == PTE_AR_RWX) {
- __asm __volatile("ptr.i %0,%1;;" :: "r"(va), "r"(shft << 2));
+ if (acc == PTE_AR_RWX || acc == PTE_AR_RX) {
+ __asm __volatile("ptr.i %0,%1;;" :: "r"(va), "r"(pgshft << 2));
__asm __volatile("srlz.i;;");
- __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(iidx), "r"(pte));
+ __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(itr_idx), "r"(pte));
__asm __volatile("srlz.i;;");
- iidx++;
+ itr_idx++;
}
+
+ return (pgshft);
}
static void
@@ -143,28 +164,43 @@ mmu_setup_legacy(uint64_t entry)
ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
__asm __volatile("srlz.i;;");
- mmu_wire(entry, IA64_RR_MASK(entry), 1UL << 28, PTE_AR_RWX);
+ mmu_wire(entry, IA64_RR_MASK(entry), 28, PTE_AR_RWX);
}
static void
-mmu_setup_paged(vm_offset_t pbvm_top)
+mmu_setup_paged(struct bootinfo *bi)
{
- vm_size_t sz;
+ void *pa;
+ size_t sz;
+ u_int shft;
ia64_set_rr(IA64_RR_BASE(IA64_PBVM_RR),
(IA64_PBVM_RR << 8) | (IA64_PBVM_PAGE_SHIFT << 2));
__asm __volatile("srlz.i;;");
/* Wire the PBVM page table. */
- mmu_wire(IA64_PBVM_PGTBL, (uintptr_t)ia64_pgtbl, ia64_pgtblsz,
- PTE_AR_RW);
-
- /* Wire as much of the PBVM we can. This must be a power of 2. */
- sz = pbvm_top - IA64_PBVM_BASE;
- sz = (sz + IA64_PBVM_PAGE_MASK) & ~IA64_PBVM_PAGE_MASK;
- while (sz & (sz - 1))
- sz -= IA64_PBVM_PAGE_SIZE;
- mmu_wire(IA64_PBVM_BASE, ia64_pgtbl[0], sz, PTE_AR_RWX);
+ mmu_wire(IA64_PBVM_PGTBL, (uintptr_t)ia64_pgtbl,
+ sz2shft(IA64_PBVM_PGTBL, ia64_pgtblsz), PTE_AR_RW);
+
+ /* Wire as much of the text segment as we can. */
+ sz = ia64_text_size; /* XXX */
+ pa = ia64_va2pa(ia64_text_start, &ia64_text_size);
+ ia64_text_size = sz; /* XXX */
+ shft = sz2shft(ia64_text_start, ia64_text_size);
+ shft = mmu_wire(ia64_text_start, (uintptr_t)pa, shft, PTE_AR_RX);
+ ia64_copyin(&shft, (uintptr_t)&bi->bi_text_mapped, 4);
+
+ /* Wire as much of the data segment as well. */
+ sz = ia64_data_size; /* XXX */
+ pa = ia64_va2pa(ia64_data_start, &ia64_data_size);
+ ia64_data_size = sz; /* XXX */
+ shft = sz2shft(ia64_data_start, ia64_data_size);
+ shft = mmu_wire(ia64_data_start, (uintptr_t)pa, shft, PTE_AR_RW);
+ ia64_copyin(&shft, (uintptr_t)&bi->bi_data_mapped, 4);
+
+ /* Update the bootinfo with the number of TRs used. */
+ ia64_copyin(&itr_idx, (uintptr_t)&bi->bi_itr_used, 4);
+ ia64_copyin(&dtr_idx, (uintptr_t)&bi->bi_dtr_used, 4);
}
static int
@@ -196,7 +232,7 @@ elf64_exec(struct preloaded_file *fp)
if (IS_LEGACY_KERNEL())
mmu_setup_legacy(hdr->e_entry);
else
- mmu_setup_paged((uintptr_t)(bi + 1));
+ mmu_setup_paged(bi);
enter_kernel(hdr->e_entry, bi);
/* NOTREACHED */
@@ -211,3 +247,20 @@ elf64_obj_exec(struct preloaded_file *fp)
fp->f_name);
return (ENOSYS);
}
+
+void
+ia64_loadseg(Elf_Ehdr *eh, Elf_Phdr *ph, uint64_t delta)
+{
+
+ if (eh->e_type != ET_EXEC)
+ return;
+
+ if (ph->p_flags & PF_X) {
+ ia64_text_start = ph->p_vaddr + delta;
+ ia64_text_size = ph->p_memsz;
+ } else {
+ ia64_data_start = ph->p_vaddr + delta;
+ ia64_data_size = ph->p_memsz;
+ }
+}
+
OpenPOWER on IntegriCloud