summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-19 15:40:03 +0900
committerPaul Mundt <lethal@linux-sh.org>2010-01-19 15:40:03 +0900
commit3ef2932b8c1fc89408ef1fd4b1e1c2caabc7f07d (patch)
tree0b12eea51d98e1edd1ef891ed7fe0a7feec4341c
parentcb6d04468d16de5a6161167ec7e76a43be540a80 (diff)
downloadop-kernel-dev-3ef2932b8c1fc89408ef1fd4b1e1c2caabc7f07d.zip
op-kernel-dev-3ef2932b8c1fc89408ef1fd4b1e1c2caabc7f07d.tar.gz
sh64: Fix up the build for the thread_xstate changes.
This updates the sh64 processor info with the sh32 changes in order to tie in to the generic task_xstate management code. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/include/asm/processor.h6
-rw-r--r--arch/sh/include/asm/processor_32.h4
-rw-r--r--arch/sh/include/asm/processor_64.h18
-rw-r--r--arch/sh/kernel/cpu/sh5/fpu.c12
-rw-r--r--arch/sh/kernel/process_64.c2
-rw-r--r--arch/sh/kernel/ptrace_64.c10
-rw-r--r--arch/sh/kernel/signal_64.c4
-rw-r--r--arch/sh/kernel/traps_64.c28
8 files changed, 43 insertions, 41 deletions
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h
index a522e5d..87a8d1e 100644
--- a/arch/sh/include/asm/processor.h
+++ b/arch/sh/include/asm/processor.h
@@ -98,9 +98,15 @@ extern struct sh_cpuinfo cpu_data[];
/* Forward decl */
struct seq_operations;
+struct task_struct;
extern struct pt_regs fake_swapper_regs;
+/* arch/sh/kernel/process.c */
+extern unsigned int xstate_size;
+extern void free_thread_xstate(struct task_struct *);
+extern struct kmem_cache *task_xstate_cachep;
+
/* arch/sh/mm/init.c */
extern unsigned int mem_init_done;
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 5fd8312..488f0a9 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -96,10 +96,6 @@ union thread_xstate {
struct sh_fpu_soft_struct softfpu;
};
-extern unsigned int xstate_size;
-extern void free_thread_xstate(struct task_struct *);
-extern struct kmem_cache *task_xstate_cachep;
-
struct thread_struct {
/* Saved registers when thread is descheduled */
unsigned long sp;
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 5727d31..7b1560f 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -87,20 +87,21 @@ struct sh_fpu_hard_struct {
/* long status; * software status information */
};
-#if 0
/* Dummy fpu emulator */
struct sh_fpu_soft_struct {
- unsigned long long fp_regs[32];
+ unsigned long fp_regs[64];
unsigned int fpscr;
unsigned char lookahead;
unsigned long entry_pc;
};
-#endif
-union sh_fpu_union {
- struct sh_fpu_hard_struct hard;
- /* 'hard' itself only produces 32 bit alignment, yet we need
- to access it using 64 bit load/store as well. */
+union thread_xstate {
+ struct sh_fpu_hard_struct hardfpu;
+ struct sh_fpu_soft_struct softfpu;
+ /*
+ * The structure definitions only produce 32 bit alignment, yet we need
+ * to access them using 64 bit load/store as well.
+ */
unsigned long long alignment_dummy;
};
@@ -122,7 +123,7 @@ struct thread_struct {
/* Hardware debugging registers may come here */
/* floating point info */
- union sh_fpu_union fpu;
+ union thread_xstate *xstate;
};
#define INIT_MMAP \
@@ -137,7 +138,6 @@ struct thread_struct {
.trap_no = 0, \
.error_code = 0, \
.address = 0, \
- .fpu = { { { 0, } }, } \
}
/*
diff --git a/arch/sh/kernel/cpu/sh5/fpu.c b/arch/sh/kernel/cpu/sh5/fpu.c
index 4648cce..92df285 100644
--- a/arch/sh/kernel/cpu/sh5/fpu.c
+++ b/arch/sh/kernel/cpu/sh5/fpu.c
@@ -27,8 +27,8 @@
#define sNAN64 0xFFFFFFFFFFFFFFFFULL
#define sNAN32 0xFFFFFFFFUL
-static union sh_fpu_union init_fpuregs = {
- .hard = {
+static union thread_xstate init_fpuregs = {
+ .hardfpu = {
.fp_regs = { [0 ... 63] = sNAN32 },
.fpscr = FPSCR_INIT
}
@@ -72,7 +72,7 @@ void save_fpu(struct task_struct *tsk)
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
- : "r" (&tsk->thread.fpu.hard)
+ : "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
@@ -121,7 +121,7 @@ fpload(struct sh_fpu_hard_struct *fpregs)
void fpinit(struct sh_fpu_hard_struct *fpregs)
{
- *fpregs = init_fpuregs.hard;
+ *fpregs = init_fpuregs.hardfpu;
}
asmlinkage void
@@ -157,10 +157,10 @@ do_fpu_state_restore(unsigned long ex, struct pt_regs *regs)
last_task_used_math = current;
if (used_math()) {
- fpload(&current->thread.fpu.hard);
+ fpload(&current->thread.xstate->hardfpu);
} else {
/* First time FPU user. */
- fpload(&init_fpuregs.hard);
+ fpload(&init_fpuregs.hardfpu);
set_used_math();
}
disable_fpu();
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 31f80c6..c9554a7 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -410,7 +410,7 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
regs->sr |= SR_FD;
}
- memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
+ memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
}
return fpvalid;
diff --git a/arch/sh/kernel/ptrace_64.c b/arch/sh/kernel/ptrace_64.c
index 873ebdc..67fbcee 100644
--- a/arch/sh/kernel/ptrace_64.c
+++ b/arch/sh/kernel/ptrace_64.c
@@ -88,7 +88,7 @@ get_fpu_long(struct task_struct *task, unsigned long addr)
regs->sr |= SR_FD;
}
- tmp = ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)];
+ tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
return tmp;
}
@@ -114,7 +114,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
- fpinit(&task->thread.fpu.hard);
+ fpinit(&task->thread.xstate->hardfpu);
set_stopped_child_used_math(task);
} else if (last_task_used_math == task) {
enable_fpu();
@@ -124,7 +124,7 @@ put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
regs->sr |= SR_FD;
}
- ((long *)&task->thread.fpu)[addr / sizeof(unsigned long)] = data;
+ ((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
return 0;
}
@@ -222,7 +222,7 @@ int fpregs_get(struct task_struct *target,
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.hard, 0, -1);
+ &target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
@@ -239,7 +239,7 @@ static int fpregs_set(struct task_struct *target,
set_stopped_child_used_math(target);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- &target->thread.fpu.hard, 0, -1);
+ &target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index ce76dbd..4733bfc 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -295,7 +295,7 @@ restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
regs->sr |= SR_FD;
}
- err |= __copy_from_user(&current->thread.fpu.hard, &sc->sc_fpregs[0],
+ err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
(sizeof(long long) * 32) + (sizeof(int) * 1));
return err;
@@ -320,7 +320,7 @@ setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
regs->sr |= SR_FD;
}
- err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.fpu.hard,
+ err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
(sizeof(long long) * 32) + (sizeof(int) * 1));
clear_used_math();
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
index d86f531..e3f92eb 100644
--- a/arch/sh/kernel/traps_64.c
+++ b/arch/sh/kernel/traps_64.c
@@ -611,19 +611,19 @@ static int misaligned_fpu_load(struct pt_regs *regs,
switch (width_shift) {
case 2:
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
- current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- current->thread.fpu.hard.fp_regs[destreg] = bufhi;
- current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
- current->thread.fpu.hard.fp_regs[destreg] = buflo;
- current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+ current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
+ current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
@@ -681,19 +681,19 @@ static int misaligned_fpu_store(struct pt_regs *regs,
switch (width_shift) {
case 2:
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
- bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
- bufhi = current->thread.fpu.hard.fp_regs[srcreg];
- buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
- buflo = current->thread.fpu.hard.fp_regs[srcreg];
- bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+ buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
+ bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
OpenPOWER on IntegriCloud