summaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/genapic_cluster.c
blob: cdb90e671b88a7bc96064f6af8ca48afc4768dea (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
/*
 * Copyright 2004 James Cleverdon, IBM.
 * Subject to the GNU Public License, v.2
 *
 * Clustered APIC subarch code.  Up to 255 CPUs, physical delivery.
 * (A more realistic maximum is around 230 CPUs.)
 *
 * Hacked for x86-64 by James Cleverdon from i386 architecture code by
 * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
 * James Cleverdon.
 */
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <asm/smp.h>
#include <asm/ipi.h>


/*
 * Set up the logical destination ID.
 *
 * Intel recommends to set DFR, LDR and TPR before enabling
 * an APIC.  See e.g. "AP-388 82489DX User's Manual" (Intel
 * document number 292116).  So here it goes...
 */
static void cluster_init_apic_ldr(void)
{
	unsigned long val, id;
	long i, count;
	u8 lid;
	u8 my_id = hard_smp_processor_id();
	u8 my_cluster = APIC_CLUSTER(my_id);

	/* Create logical APIC IDs by counting CPUs already in cluster. */
	for (count = 0, i = NR_CPUS; --i >= 0; ) {
		lid = x86_cpu_to_log_apicid[i];
		if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
			++count;
	}
	/*
	 * We only have a 4 wide bitmap in cluster mode.  There's no way
	 * to get above 60 CPUs and still give each one it's own bit.
	 * But, we're using physical IRQ delivery, so we don't care.
	 * Use bit 3 for the 4th through Nth CPU in each cluster.
	 */
	if (count >= XAPIC_DEST_CPUS_SHIFT)
		count = 3;
	id = my_cluster | (1UL << count);
	x86_cpu_to_log_apicid[smp_processor_id()] = id;
	apic_write(APIC_DFR, APIC_DFR_CLUSTER);
	val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
	val |= SET_APIC_LOGICAL_ID(id);
	apic_write(APIC_LDR, val);
}

/* Start with all IRQs pointing to boot CPU.  IRQ balancing will shift them. */

static cpumask_t cluster_target_cpus(void)
{
	return cpumask_of_cpu(0);
}

static void cluster_send_IPI_mask(cpumask_t mask, int vector)
{
	send_IPI_mask_sequence(mask, vector);
}

static void cluster_send_IPI_allbutself(int vector)
{
	cpumask_t mask = cpu_online_map;

	cpu_clear(smp_processor_id(), mask);

	if (!cpus_empty(mask))
		cluster_send_IPI_mask(mask, vector);
}

static void cluster_send_IPI_all(int vector)
{
	cluster_send_IPI_mask(cpu_online_map, vector);
}

static int cluster_apic_id_registered(void)
{
	return 1;
}

static unsigned int cluster_cpu_mask_to_apicid(cpumask_t cpumask)
{
	int cpu;

	/*
	 * We're using fixed IRQ delivery, can only return one phys APIC ID.
	 * May as well be the first.
	 */
	cpu = first_cpu(cpumask);
	if ((unsigned)cpu < NR_CPUS)
		return x86_cpu_to_apicid[cpu];
	else
		return BAD_APICID;
}

/* cpuid returns the value latched in the HW at reset, not the APIC ID
 * register's value.  For any box whose BIOS changes APIC IDs, like
 * clustered APIC systems, we must use hard_smp_processor_id.
 *
 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
 */
static unsigned int phys_pkg_id(int index_msb)
{
	return hard_smp_processor_id() >> index_msb;
}

struct genapic apic_cluster = {
	.name = "clustered",
	.int_delivery_mode = dest_Fixed,
	.int_dest_mode = (APIC_DEST_PHYSICAL != 0),
	.target_cpus = cluster_target_cpus,
	.apic_id_registered = cluster_apic_id_registered,
	.init_apic_ldr = cluster_init_apic_ldr,
	.send_IPI_all = cluster_send_IPI_all,
	.send_IPI_allbutself = cluster_send_IPI_allbutself,
	.send_IPI_mask = cluster_send_IPI_mask,
	.cpu_mask_to_apicid = cluster_cpu_mask_to_apicid,
	.phys_pkg_id = phys_pkg_id,
};
OpenPOWER on IntegriCloud