diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_builtin.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_builtin.c | 105 |
1 files changed, 102 insertions, 3 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c index a71e2fc..18afe65 100644 --- a/arch/powerpc/kvm/book3s_hv_builtin.c +++ b/arch/powerpc/kvm/book3s_hv_builtin.c @@ -53,11 +53,109 @@ EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); /* * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) - * should be power of 2. + * only needs to be 256kB. */ -#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ +#define HPT_ALIGN_ORDER 18 /* 256k */ +#define HPT_ALIGN_PAGES ((1 << HPT_ALIGN_ORDER) >> PAGE_SHIFT) + +#define KVM_RESV_CHUNK_ORDER HPT_ALIGN_ORDER + /* - * By default we reserve 5% of memory for hash pagetable allocation. + * By default we reserve 2% of memory exclusively for guest HPT + * allocations, plus another 3% in the CMA zone which can be used + * either for HPTs or for movable page allocations. + * Each guest's HPT will be sized at between 1/128 and 1/64 of its + * memory, i.e. up to 1.56%, and allowing for about a 3x memory + * overcommit factor gets us to about 5%. + */ +static unsigned long kvm_hpt_resv_ratio = 2; + +static int __init early_parse_kvm_hpt_resv(char *p) +{ + pr_debug("%s(%s)\n", __func__, p); + if (!p) + return -EINVAL; + return kstrtoul(p, 0, &kvm_hpt_resv_ratio); +} +early_param("kvm_hpt_resv_ratio", early_parse_kvm_hpt_resv); + +static unsigned long kvm_resv_addr; +static unsigned long *kvm_resv_bitmap; +static unsigned long kvm_resv_chunks; +static DEFINE_MUTEX(kvm_resv_lock); + +void kvm_resv_hpt_init(void) +{ + unsigned long align = 1ul << KVM_RESV_CHUNK_ORDER; + unsigned long size, bm_size; + unsigned long addr, bm; + unsigned long *bmp; + + if (!cpu_has_feature(CPU_FTR_HVMODE)) + return; + + size = memblock_phys_mem_size() * kvm_hpt_resv_ratio / 100; + size = ALIGN(size, align); + if (!size) + return; + + pr_info("KVM: Allocating %lu MiB for hashed page tables\n", + size >> 20); + + addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); + if (!addr) { + pr_err("KVM: Allocation of reserved memory for HPTs failed\n"); + return; + } + pr_info("KVM: %lu MiB reserved for HPTs at %lx\n", size >> 20, addr); + + bm_size = BITS_TO_LONGS(size >> KVM_RESV_CHUNK_ORDER) * sizeof(long); + bm = __memblock_alloc_base(bm_size, sizeof(long), + MEMBLOCK_ALLOC_ACCESSIBLE); + if (!bm) { + pr_err("KVM: Allocation of reserved memory bitmap failed\n"); + return; + } + bmp = __va(bm); + memset(bmp, 0, bm_size); + + kvm_resv_addr = (unsigned long) __va(addr); + kvm_resv_chunks = size >> KVM_RESV_CHUNK_ORDER; + kvm_resv_bitmap = bmp; +} + +unsigned long kvmhv_alloc_resv_hpt(u32 order) +{ + unsigned long nr_chunks = 1ul << (order - KVM_RESV_CHUNK_ORDER); + unsigned long chunk; + + mutex_lock(&kvm_resv_lock); + chunk = bitmap_find_next_zero_area(kvm_resv_bitmap, kvm_resv_chunks, + 0, nr_chunks, 0); + if (chunk < kvm_resv_chunks) + bitmap_set(kvm_resv_bitmap, chunk, nr_chunks); + mutex_unlock(&kvm_resv_lock); + + if (chunk < kvm_resv_chunks) + return kvm_resv_addr + (chunk << KVM_RESV_CHUNK_ORDER); + return 0; +} +EXPORT_SYMBOL_GPL(kvmhv_alloc_resv_hpt); + +void kvmhv_release_resv_hpt(unsigned long addr, u32 order) +{ + unsigned long nr_chunks = 1ul << (order - KVM_RESV_CHUNK_ORDER); + unsigned long chunk = (addr - kvm_resv_addr) >> KVM_RESV_CHUNK_ORDER; + + mutex_lock(&kvm_resv_lock); + if (chunk + nr_chunks <= kvm_resv_chunks) + bitmap_clear(kvm_resv_bitmap, chunk, nr_chunks); + mutex_unlock(&kvm_resv_lock); +} +EXPORT_SYMBOL_GPL(kvmhv_release_resv_hpt); + +/* + * By default we reserve 3% of memory for the CMA zone. */ static unsigned long kvm_cma_resv_ratio = 5; @@ -106,6 +204,7 @@ void __init kvm_cma_reserve(void) */ if (!cpu_has_feature(CPU_FTR_HVMODE)) return; + /* * We cannot use memblock_phys_mem_size() here, because * memblock_analyze() has not been called yet. |