1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
|
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
*/
/*
* Cross Partition (XP) sn2-based functions.
*
* Architecture specific implementation of common functions.
*/
#include <linux/module.h>
#include <linux/device.h>
#include <asm/sn/bte.h>
#include <asm/sn/sn_sal.h>
#include "xp.h"
/*
* The export of xp_nofault_PIOR needs to happen here since it is defined
* in drivers/misc/sgi-xp/xp_nofault.S. The target of the nofault read is
* defined here.
*/
EXPORT_SYMBOL_GPL(xp_nofault_PIOR);
u64 xp_nofault_PIOR_target;
EXPORT_SYMBOL_GPL(xp_nofault_PIOR_target);
/*
* Register a nofault code region which performs a cross-partition PIO read.
* If the PIO read times out, the MCA handler will consume the error and
* return to a kernel-provided instruction to indicate an error. This PIO read
* exists because it is guaranteed to timeout if the destination is down
* (amo operations do not timeout on at least some CPUs on Shubs <= v1.2,
* which unfortunately we have to work around).
*/
static enum xp_retval
xp_register_nofault_code_sn2(void)
{
int ret;
u64 func_addr;
u64 err_func_addr;
func_addr = *(u64 *)xp_nofault_PIOR;
err_func_addr = *(u64 *)xp_error_PIOR;
ret = sn_register_nofault_code(func_addr, err_func_addr, err_func_addr,
1, 1);
if (ret != 0) {
dev_err(xp, "can't register nofault code, error=%d\n", ret);
return xpSalError;
}
/*
* Setup the nofault PIO read target. (There is no special reason why
* SH_IPI_ACCESS was selected.)
*/
if (is_shub1())
xp_nofault_PIOR_target = SH1_IPI_ACCESS;
else if (is_shub2())
xp_nofault_PIOR_target = SH2_IPI_ACCESS0;
return xpSuccess;
}
static void
xp_unregister_nofault_code_sn2(void)
{
u64 func_addr = *(u64 *)xp_nofault_PIOR;
u64 err_func_addr = *(u64 *)xp_error_PIOR;
/* unregister the PIO read nofault code region */
(void)sn_register_nofault_code(func_addr, err_func_addr,
err_func_addr, 1, 0);
}
/*
* Convert a virtual memory address to a physical memory address.
*/
static unsigned long
xp_pa_sn2(void *addr)
{
return __pa(addr);
}
/*
* Wrapper for bte_copy().
*
* dst_pa - physical address of the destination of the transfer.
* src_pa - physical address of the source of the transfer.
* len - number of bytes to transfer from source to destination.
*
* Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock.
*/
static enum xp_retval
xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa,
size_t len)
{
bte_result_t ret;
ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL);
if (ret == BTE_SUCCESS)
return xpSuccess;
if (is_shub2()) {
dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa="
"0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa,
src_pa, len);
} else {
dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx "
"src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len);
}
return xpBteCopyError;
}
static int
xp_cpu_to_nasid_sn2(int cpuid)
{
return cpuid_to_nasid(cpuid);
}
enum xp_retval
xp_init_sn2(void)
{
BUG_ON(!is_shub());
xp_max_npartitions = XP_MAX_NPARTITIONS_SN2;
xp_partition_id = sn_partition_id;
xp_region_size = sn_region_size;
xp_pa = xp_pa_sn2;
xp_remote_memcpy = xp_remote_memcpy_sn2;
xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
return xp_register_nofault_code_sn2();
}
void
xp_exit_sn2(void)
{
BUG_ON(!is_shub());
xp_unregister_nofault_code_sn2();
}
|