diff options
author | marcel <marcel@FreeBSD.org> | 2011-04-03 23:49:20 +0000 |
---|---|---|
committer | marcel <marcel@FreeBSD.org> | 2011-04-03 23:49:20 +0000 |
commit | 09f8bacb548dc6d3d4127689c542edcb9ea0dc1e (patch) | |
tree | 8f01070f57e7175478f2e6afecfa6c8b092f4ae1 /sys/boot/ia64/common/copy.c | |
parent | 57f20b6d96e83eb8b380cc3211f0ddf2cc96469e (diff) | |
download | FreeBSD-src-09f8bacb548dc6d3d4127689c542edcb9ea0dc1e.zip FreeBSD-src-09f8bacb548dc6d3d4127689c542edcb9ea0dc1e.tar.gz |
Use the new arch_loadaddr I/F to align ELF objects to PBVM page
boundaries. For good measure, align all other objects to cache
lines boundaries.
Use the new arch_loadseg I/F to keep track of kernel text and
data so that we can wire as much of it as is possible. It is
the responsibility of the kernel to link critical (read IVT
related) code and data at the front of the respective segment
so that it's covered by TRs before the kernel has a chance to
add more translations.
Use a better way of determining whether we're loading a legacy
kernel or not. We can't check for the presence of the PBVM page
table, because we may have unloaded that kernel and loaded an
older (legacy) kernel after that. Simply use the latest load
address for it.
Diffstat (limited to 'sys/boot/ia64/common/copy.c')
-rw-r--r-- | sys/boot/ia64/common/copy.c | 30 |
1 files changed, 24 insertions, 6 deletions
diff --git a/sys/boot/ia64/common/copy.c b/sys/boot/ia64/common/copy.c index 93ef77b..95523c2 100644 --- a/sys/boot/ia64/common/copy.c +++ b/sys/boot/ia64/common/copy.c @@ -28,10 +28,12 @@ __FBSDID("$FreeBSD$"); #include <stand.h> -#include <ia64/include/vmparam.h> +#include <machine/param.h> #include "libia64.h" +u_int ia64_legacy_kernel; + uint64_t *ia64_pgtbl; uint32_t ia64_pgtblsz; @@ -80,8 +82,8 @@ pgtbl_extend(u_int idx) return (0); } -static void * -va2pa(vm_offset_t va, size_t *len) +void * +ia64_va2pa(vm_offset_t va, size_t *len) { uint64_t pa; u_int idx, ofs; @@ -89,6 +91,7 @@ va2pa(vm_offset_t va, size_t *len) /* Backward compatibility. */ if (va >= IA64_RR_BASE(7)) { + ia64_legacy_kernel = 1; pa = IA64_RR_MASK(va); return ((void *)pa); } @@ -98,6 +101,8 @@ va2pa(vm_offset_t va, size_t *len) goto fail; } + ia64_legacy_kernel = 0; + idx = (va - IA64_PBVM_BASE) >> IA64_PBVM_PAGE_SHIFT; if (idx >= (ia64_pgtblsz >> 3)) { error = pgtbl_extend(idx); @@ -138,7 +143,7 @@ ia64_copyin(const void *src, vm_offset_t va, size_t len) res = 0; while (len > 0) { sz = len; - pa = va2pa(va, &sz); + pa = ia64_va2pa(va, &sz); if (sz == 0) break; bcopy(src, pa, sz); @@ -159,7 +164,7 @@ ia64_copyout(vm_offset_t va, void *dst, size_t len) res = 0; while (len > 0) { sz = len; - pa = va2pa(va, &sz); + pa = ia64_va2pa(va, &sz); if (sz == 0) break; bcopy(pa, dst, sz); @@ -170,6 +175,19 @@ ia64_copyout(vm_offset_t va, void *dst, size_t len) return (res); } +uint64_t +ia64_loadaddr(u_int type, void *data, uint64_t addr) +{ + uint64_t align; + + /* + * Align ELF objects at PBVM page boundaries. Align all other + * objects at cache line boundaries for good measure. + */ + align = (type == LOAD_ELF) ? IA64_PBVM_PAGE_SIZE : CACHE_LINE_SIZE; + return ((addr + align - 1) & ~(align - 1)); +} + ssize_t ia64_readin(int fd, vm_offset_t va, size_t len) { @@ -180,7 +198,7 @@ ia64_readin(int fd, vm_offset_t va, size_t len) res = 0; while (len > 0) { sz = len; - pa = va2pa(va, &sz); + pa = ia64_va2pa(va, &sz); if (sz == 0) break; s = read(fd, pa, sz); |