summaryrefslogtreecommitdiffstats
path: root/include/asm-ia64/pgalloc.h
blob: 2b7127330ae137f087892ecd77b30a6e5f0f1857 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
#ifndef _ASM_IA64_PGALLOC_H
#define _ASM_IA64_PGALLOC_H

/*
 * This file contains the functions and defines necessary to allocate
 * page tables.
 *
 * This hopefully works with any (fixed) ia-64 page-size, as defined
 * in <asm/page.h> (currently 8192).
 *
 * Copyright (C) 1998-2001 Hewlett-Packard Co
 *	David Mosberger-Tang <davidm@hpl.hp.com>
 * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
 */

#include <linux/config.h>

#include <linux/compiler.h>
#include <linux/mm.h>
#include <linux/page-flags.h>
#include <linux/threads.h>

#include <asm/mmu_context.h>

DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
DECLARE_PER_CPU(long, __pgtable_quicklist_size);
#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)

static inline long pgtable_quicklist_total_size(void)
{
	long ql_size = 0;
	int cpuid;

	for_each_online_cpu(cpuid) {
		ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
	}
	return ql_size;
}

static inline void *pgtable_quicklist_alloc(void)
{
	unsigned long *ret = NULL;

	preempt_disable();

	ret = pgtable_quicklist;
	if (likely(ret != NULL)) {
		pgtable_quicklist = (unsigned long *)(*ret);
		ret[0] = 0;
		--pgtable_quicklist_size;
	} else {
		ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
	}

	preempt_enable();

	return ret;
}

static inline void pgtable_quicklist_free(void *pgtable_entry)
{
#ifdef CONFIG_NUMA
	unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));

	if (unlikely(nid != numa_node_id())) {
		free_page((unsigned long)pgtable_entry);
		return;
	}
#endif

	preempt_disable();
	*(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
	pgtable_quicklist = (unsigned long *)pgtable_entry;
	++pgtable_quicklist_size;
	preempt_enable();
}

static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
	return pgtable_quicklist_alloc();
}

static inline void pgd_free(pgd_t * pgd)
{
	pgtable_quicklist_free(pgd);
}

static inline void
pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
{
	pud_val(*pud_entry) = __pa(pmd);
}

static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
	return pgtable_quicklist_alloc();
}

static inline void pmd_free(pmd_t * pmd)
{
	pgtable_quicklist_free(pmd);
}

#define __pmd_free_tlb(tlb, pmd)	pmd_free(pmd)

static inline void
pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
{
	pmd_val(*pmd_entry) = page_to_phys(pte);
}

static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
{
	pmd_val(*pmd_entry) = __pa(pte);
}

static inline struct page *pte_alloc_one(struct mm_struct *mm,
					 unsigned long addr)
{
	return virt_to_page(pgtable_quicklist_alloc());
}

static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
					  unsigned long addr)
{
	return pgtable_quicklist_alloc();
}

static inline void pte_free(struct page *pte)
{
	pgtable_quicklist_free(page_address(pte));
}

static inline void pte_free_kernel(pte_t * pte)
{
	pgtable_quicklist_free(pte);
}

#define __pte_free_tlb(tlb, pte)	pte_free(pte)

extern void check_pgt_cache(void);

#endif				/* _ASM_IA64_PGALLOC_H */
OpenPOWER on IntegriCloud