1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
|
/*
* Support for Medifield PNW Camera Imaging ISP subsystem.
*
* Copyright (c) 2010 Intel Corporation. All Rights Reserved.
*
* Copyright (c) 2010 Silicon Hive www.siliconhive.com.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
*/
/*
* ISP MMU driver for classic two-level page tables
*/
#ifndef __ISP_MMU_H__
#define __ISP_MMU_H__
#include <linux/types.h>
#include <linux/mutex.h>
#include <linux/slab.h>
/*
* do not change these values, the page size for ISP must be the
* same as kernel's page size.
*/
#define ISP_PAGE_OFFSET 12
#define ISP_PAGE_SIZE (1U << ISP_PAGE_OFFSET)
#define ISP_PAGE_MASK (~(phys_addr_t)(ISP_PAGE_SIZE - 1))
#define ISP_L1PT_OFFSET 22
#define ISP_L1PT_MASK (~((1U << ISP_L1PT_OFFSET) - 1))
#define ISP_L2PT_OFFSET 12
#define ISP_L2PT_MASK (~(ISP_L1PT_MASK|(~(ISP_PAGE_MASK))))
#define ISP_L1PT_PTES 1024
#define ISP_L2PT_PTES 1024
#define ISP_PTR_TO_L1_IDX(x) ((((x) & ISP_L1PT_MASK)) \
>> ISP_L1PT_OFFSET)
#define ISP_PTR_TO_L2_IDX(x) ((((x) & ISP_L2PT_MASK)) \
>> ISP_L2PT_OFFSET)
#define ISP_PAGE_ALIGN(x) (((x) + (ISP_PAGE_SIZE-1)) \
& ISP_PAGE_MASK)
#define ISP_PT_TO_VIRT(l1_idx, l2_idx, offset) do {\
((l1_idx) << ISP_L1PT_OFFSET) | \
((l2_idx) << ISP_L2PT_OFFSET) | \
(offset)\
} while (0)
#define pgnr_to_size(pgnr) ((pgnr) << ISP_PAGE_OFFSET)
#define size_to_pgnr_ceil(size) (((size) + (1 << ISP_PAGE_OFFSET) - 1)\
>> ISP_PAGE_OFFSET)
#define size_to_pgnr_bottom(size) ((size) >> ISP_PAGE_OFFSET)
struct isp_mmu;
struct isp_mmu_client {
/*
* const value
*
* @name:
* driver name
* @pte_valid_mask:
* should be 1 bit valid data, meaning the value should
* be power of 2.
*/
char *name;
unsigned int pte_valid_mask;
unsigned int null_pte;
/*
* set/get page directory base address (physical address).
*
* must be provided.
*/
int (*set_pd_base) (struct isp_mmu *mmu,
phys_addr_t pd_base);
unsigned int (*get_pd_base) (struct isp_mmu *mmu, phys_addr_t pd_base);
/*
* callback to flush tlb.
*
* tlb_flush_range will at least flush TLBs containing
* address mapping from addr to addr + size.
*
* tlb_flush_all will flush all TLBs.
*
* tlb_flush_all is must be provided. if tlb_flush_range is
* not valid, it will set to tlb_flush_all by default.
*/
void (*tlb_flush_range) (struct isp_mmu *mmu,
unsigned int addr, unsigned int size);
void (*tlb_flush_all) (struct isp_mmu *mmu);
unsigned int (*phys_to_pte) (struct isp_mmu *mmu,
phys_addr_t phys);
phys_addr_t (*pte_to_phys) (struct isp_mmu *mmu,
unsigned int pte);
};
struct isp_mmu {
struct isp_mmu_client *driver;
unsigned int l1_pte;
int l2_pgt_refcount[ISP_L1PT_PTES];
phys_addr_t base_address;
struct mutex pt_mutex;
struct kmem_cache *tbl_cache;
};
/* flags for PDE and PTE */
#define ISP_PTE_VALID_MASK(mmu) \
((mmu)->driver->pte_valid_mask)
#define ISP_PTE_VALID(mmu, pte) \
((pte) & ISP_PTE_VALID_MASK(mmu))
#define NULL_PAGE ((phys_addr_t)(-1) & ISP_PAGE_MASK)
#define PAGE_VALID(page) ((page) != NULL_PAGE)
/*
* init mmu with specific mmu driver.
*/
int isp_mmu_init(struct isp_mmu *mmu, struct isp_mmu_client *driver);
/*
* cleanup all mmu related things.
*/
void isp_mmu_exit(struct isp_mmu *mmu);
/*
* setup/remove address mapping for pgnr continous physical pages
* and isp_virt.
*
* map/unmap is mutex lock protected, and caller does not have
* to do lock/unlock operation.
*
* map/unmap will not flush tlb, and caller needs to deal with
* this itself.
*/
int isp_mmu_map(struct isp_mmu *mmu, unsigned int isp_virt,
phys_addr_t phys, unsigned int pgnr);
void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt,
unsigned int pgnr);
static inline void isp_mmu_flush_tlb_all(struct isp_mmu *mmu)
{
if (mmu->driver && mmu->driver->tlb_flush_all)
mmu->driver->tlb_flush_all(mmu);
}
#define isp_mmu_flush_tlb isp_mmu_flush_tlb_all
static inline void isp_mmu_flush_tlb_range(struct isp_mmu *mmu,
unsigned int start, unsigned int size)
{
if (mmu->driver && mmu->driver->tlb_flush_range)
mmu->driver->tlb_flush_range(mmu, start, size);
}
#endif /* ISP_MMU_H_ */
|