1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
|
/*
* Declarations for obsolete exec.c functions
*
* Copyright 2011 Red Hat, Inc. and/or its affiliates
*
* Authors:
* Avi Kivity <avi@redhat.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* later. See the COPYING file in the top-level directory.
*
*/
/*
* This header is for use by exec.c and memory.c ONLY. Do not include it.
* The functions declared here will be removed soon.
*/
#ifndef EXEC_OBSOLETE_H
#define EXEC_OBSOLETE_H
#ifndef WANT_EXEC_OBSOLETE
#error Do not include exec-obsolete.h
#endif
#ifndef CONFIG_USER_ONLY
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
MemoryRegion *mr);
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr);
void qemu_ram_free(ram_addr_t addr);
void qemu_ram_free_from_ptr(ram_addr_t addr);
struct MemoryRegion;
struct MemoryRegionSection;
void cpu_register_physical_memory_log(struct MemoryRegionSection *section,
bool readonly);
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
int cpu_physical_memory_set_dirty_tracking(int enable);
#define VGA_DIRTY_FLAG 0x01
#define CODE_DIRTY_FLAG 0x02
#define MIGRATION_DIRTY_FLAG 0x08
/* read dirty bit (return 0 or 1) */
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
{
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
}
static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
{
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
}
static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
ram_addr_t length,
int dirty_flags)
{
int ret = 0;
uint8_t *p;
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
ret |= *p++ & dirty_flags;
}
return ret;
}
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
{
ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
}
static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
int dirty_flags)
{
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length,
int dirty_flags)
{
uint8_t *p;
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
*p++ |= dirty_flags;
}
}
static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
ram_addr_t length,
int dirty_flags)
{
int mask;
uint8_t *p;
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
mask = ~dirty_flags;
p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
*p++ &= mask;
}
}
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags);
extern const IORangeOps memory_region_iorange_ops;
#endif
#endif
|