summaryrefslogtreecommitdiffstats
path: root/kernel/irq/numa_migrate.c
blob: 666260e4c0655d4488e51ab28302993d2cf392fa (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
/*
 * NUMA irq-desc migration code
 *
 * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to
 * the new "home node" of the IRQ.
 */

#include <linux/irq.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>

#include "internals.h"

static void init_copy_kstat_irqs(struct irq_desc *old_desc,
				 struct irq_desc *desc,
				 int cpu, int nr)
{
	unsigned long bytes;

	init_kstat_irqs(desc, cpu, nr);

	if (desc->kstat_irqs != old_desc->kstat_irqs) {
		/* Compute how many bytes we need per irq and allocate them */
		bytes = nr * sizeof(unsigned int);

		memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
	}
}

static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
{
	if (old_desc->kstat_irqs == desc->kstat_irqs)
		return;

	kfree(old_desc->kstat_irqs);
	old_desc->kstat_irqs = NULL;
}

static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
		 struct irq_desc *desc, int cpu)
{
	memcpy(desc, old_desc, sizeof(struct irq_desc));
	if (!init_alloc_desc_masks(desc, cpu, false)) {
		printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
				"for migration.\n", irq);
		return false;
	}
	spin_lock_init(&desc->lock);
	desc->cpu = cpu;
	lockdep_set_class(&desc->lock, &irq_desc_lock_class);
	init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
	init_copy_desc_masks(old_desc, desc);
	arch_init_copy_chip_data(old_desc, desc, cpu);
	return true;
}

static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
{
	free_kstat_irqs(old_desc, desc);
	arch_free_chip_data(old_desc, desc);
}

static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
						int cpu)
{
	struct irq_desc *desc;
	unsigned int irq;
	unsigned long flags;
	int node;

	irq = old_desc->irq;

	spin_lock_irqsave(&sparse_irq_lock, flags);

	/* We have to check it to avoid races with another CPU */
	desc = irq_desc_ptrs[irq];

	if (desc && old_desc != desc)
			goto out_unlock;

	node = cpu_to_node(cpu);
	desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
	if (!desc) {
		printk(KERN_ERR "irq %d: can not get new irq_desc "
				"for migration.\n", irq);
		/* still use old one */
		desc = old_desc;
		goto out_unlock;
	}
	if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
		/* still use old one */
		kfree(desc);
		desc = old_desc;
		goto out_unlock;
	}

	irq_desc_ptrs[irq] = desc;

	/* free the old one */
	free_one_irq_desc(old_desc, desc);
	kfree(old_desc);

out_unlock:
	spin_unlock_irqrestore(&sparse_irq_lock, flags);

	return desc;
}

struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu)
{
	int old_cpu;
	int node, old_node;

	/* those all static, do move them */
	if (desc->irq < NR_IRQS_LEGACY)
		return desc;

	old_cpu = desc->cpu;
	if (old_cpu != cpu) {
		node = cpu_to_node(cpu);
		old_node = cpu_to_node(old_cpu);
		if (old_node != node)
			desc = __real_move_irq_desc(desc, cpu);
		else
			desc->cpu = cpu;
	}

	return desc;
}

OpenPOWER on IntegriCloud