summaryrefslogtreecommitdiffstats
path: root/target-ppc/op_mem.h
diff options
context:
space:
mode:
authorbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2005-04-23 18:16:07 +0000
committerbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2005-04-23 18:16:07 +0000
commit111bfab3b5c6a47a7182e095647e6a5e0e17feb8 (patch)
tree7ff41315c47fc137d03a92d42acc7333965544f0 /target-ppc/op_mem.h
parentc7d344af8fb308975f941de1640b917e1b085a81 (diff)
downloadhqemu-111bfab3b5c6a47a7182e095647e6a5e0e17feb8.zip
hqemu-111bfab3b5c6a47a7182e095647e6a5e0e17feb8.tar.gz
This patch adds little-endian mode support to PPC emulation.
This is needed by OS/2 and Windows NT and some programs like VirtualPC. This patch has been tested using OS/2 bootloader (thanks to Tero Kaarlela). (Jocelyn Mayer) git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1379 c046a42c-6fe2-441c-8c8c-71466251a162
Diffstat (limited to 'target-ppc/op_mem.h')
-rw-r--r--target-ppc/op_mem.h180
1 files changed, 180 insertions, 0 deletions
diff --git a/target-ppc/op_mem.h b/target-ppc/op_mem.h
index f0f0cd1..9b3f721 100644
--- a/target-ppc/op_mem.h
+++ b/target-ppc/op_mem.h
@@ -8,6 +8,12 @@ static inline uint16_t glue(ld16r, MEMSUFFIX) (target_ulong EA)
return ((tmp & 0xFF00) >> 8) | ((tmp & 0x00FF) << 8);
}
+static inline int32_t glue(ld16rs, MEMSUFFIX) (target_ulong EA)
+{
+ int16_t tmp = glue(lduw, MEMSUFFIX)(EA);
+ return ((tmp & 0xFF00) >> 8) | ((tmp & 0x00FF) << 8);
+}
+
static inline uint32_t glue(ld32r, MEMSUFFIX) (target_ulong EA)
{
uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
@@ -48,17 +54,29 @@ PPC_LD_OP(ha, ldsw);
PPC_LD_OP(hz, lduw);
PPC_LD_OP(wz, ldl);
+PPC_LD_OP(ha_le, ld16rs);
+PPC_LD_OP(hz_le, ld16r);
+PPC_LD_OP(wz_le, ld32r);
+
/*** Integer store ***/
PPC_ST_OP(b, stb);
PPC_ST_OP(h, stw);
PPC_ST_OP(w, stl);
+PPC_ST_OP(h_le, st16r);
+PPC_ST_OP(w_le, st32r);
+
/*** Integer load and store with byte reverse ***/
PPC_LD_OP(hbr, ld16r);
PPC_LD_OP(wbr, ld32r);
PPC_ST_OP(hbr, st16r);
PPC_ST_OP(wbr, st32r);
+PPC_LD_OP(hbr_le, lduw);
+PPC_LD_OP(wbr_le, ldl);
+PPC_ST_OP(hbr_le, stw);
+PPC_ST_OP(wbr_le, stl);
+
/*** Integer load and store multiple ***/
PPC_OP(glue(lmw, MEMSUFFIX))
{
@@ -80,6 +98,26 @@ PPC_OP(glue(stmw, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(lmw_le, MEMSUFFIX))
+{
+ int dst = PARAM(1);
+
+ for (; dst < 32; dst++, T0 += 4) {
+ ugpr(dst) = glue(ld32r, MEMSUFFIX)(T0);
+ }
+ RETURN();
+}
+
+PPC_OP(glue(stmw_le, MEMSUFFIX))
+{
+ int src = PARAM(1);
+
+ for (; src < 32; src++, T0 += 4) {
+ glue(st32r, MEMSUFFIX)(T0, ugpr(src));
+ }
+ RETURN();
+}
+
/*** Integer load and store strings ***/
PPC_OP(glue(lswi, MEMSUFFIX))
{
@@ -87,6 +125,13 @@ PPC_OP(glue(lswi, MEMSUFFIX))
RETURN();
}
+void glue(do_lsw_le, MEMSUFFIX) (int dst);
+PPC_OP(glue(lswi_le, MEMSUFFIX))
+{
+ glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
+ RETURN();
+}
+
/* PPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
* In an other hand, IBM says this is valid, but rA won't be loaded.
@@ -105,12 +150,32 @@ PPC_OP(glue(lswx, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(lswx_le, MEMSUFFIX))
+{
+ if (T1 > 0) {
+ if ((PARAM(1) < PARAM(2) && (PARAM(1) + T1) > PARAM(2)) ||
+ (PARAM(1) < PARAM(3) && (PARAM(1) + T1) > PARAM(3))) {
+ do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
+ } else {
+ glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
+ }
+ }
+ RETURN();
+}
+
PPC_OP(glue(stsw, MEMSUFFIX))
{
glue(do_stsw, MEMSUFFIX)(PARAM(1));
RETURN();
}
+void glue(do_stsw_le, MEMSUFFIX) (int src);
+PPC_OP(glue(stsw_le, MEMSUFFIX))
+{
+ glue(do_stsw_le, MEMSUFFIX)(PARAM(1));
+ RETURN();
+}
+
/*** Floating-point store ***/
#define PPC_STF_OP(name, op) \
PPC_OP(glue(glue(st, name), MEMSUFFIX)) \
@@ -122,6 +187,43 @@ PPC_OP(glue(glue(st, name), MEMSUFFIX)) \
PPC_STF_OP(fd, stfq);
PPC_STF_OP(fs, stfl);
+static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
+{
+ union {
+ double d;
+ uint64_t u;
+ } u;
+
+ u.d = d;
+ u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
+ ((u.u & 0x00FF000000000000ULL) >> 40) |
+ ((u.u & 0x0000FF0000000000ULL) >> 24) |
+ ((u.u & 0x000000FF00000000ULL) >> 8) |
+ ((u.u & 0x00000000FF000000ULL) << 8) |
+ ((u.u & 0x0000000000FF0000ULL) << 24) |
+ ((u.u & 0x000000000000FF00ULL) << 40) |
+ ((u.u & 0x00000000000000FFULL) << 56);
+ glue(stfq, MEMSUFFIX)(EA, u.d);
+}
+
+static inline void glue(stflr, MEMSUFFIX) (target_ulong EA, float f)
+{
+ union {
+ float f;
+ uint32_t u;
+ } u;
+
+ u.f = f;
+ u.u = ((u.u & 0xFF000000UL) >> 24) |
+ ((u.u & 0x00FF0000ULL) >> 8) |
+ ((u.u & 0x0000FF00UL) << 8) |
+ ((u.u & 0x000000FFULL) << 24);
+ glue(stfl, MEMSUFFIX)(EA, u.f);
+}
+
+PPC_STF_OP(fd_le, stfqr);
+PPC_STF_OP(fs_le, stflr);
+
/*** Floating-point load ***/
#define PPC_LDF_OP(name, op) \
PPC_OP(glue(glue(l, name), MEMSUFFIX)) \
@@ -133,6 +235,45 @@ PPC_OP(glue(glue(l, name), MEMSUFFIX)) \
PPC_LDF_OP(fd, ldfq);
PPC_LDF_OP(fs, ldfl);
+static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
+{
+ union {
+ double d;
+ uint64_t u;
+ } u;
+
+ u.d = glue(ldfq, MEMSUFFIX)(EA);
+ u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
+ ((u.u & 0x00FF000000000000ULL) >> 40) |
+ ((u.u & 0x0000FF0000000000ULL) >> 24) |
+ ((u.u & 0x000000FF00000000ULL) >> 8) |
+ ((u.u & 0x00000000FF000000ULL) << 8) |
+ ((u.u & 0x0000000000FF0000ULL) << 24) |
+ ((u.u & 0x000000000000FF00ULL) << 40) |
+ ((u.u & 0x00000000000000FFULL) << 56);
+
+ return u.d;
+}
+
+static inline float glue(ldflr, MEMSUFFIX) (target_ulong EA)
+{
+ union {
+ float f;
+ uint32_t u;
+ } u;
+
+ u.f = glue(ldfl, MEMSUFFIX)(EA);
+ u.u = ((u.u & 0xFF000000UL) >> 24) |
+ ((u.u & 0x00FF0000ULL) >> 8) |
+ ((u.u & 0x0000FF00UL) << 8) |
+ ((u.u & 0x000000FFULL) << 24);
+
+ return u.f;
+}
+
+PPC_LDF_OP(fd_le, ldfqr);
+PPC_LDF_OP(fs_le, ldflr);
+
/* Load and set reservation */
PPC_OP(glue(lwarx, MEMSUFFIX))
{
@@ -145,6 +286,17 @@ PPC_OP(glue(lwarx, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(lwarx_le, MEMSUFFIX))
+{
+ if (T0 & 0x03) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ T1 = glue(ld32r, MEMSUFFIX)(T0);
+ regs->reserve = T0;
+ }
+ RETURN();
+}
+
/* Store with reservation */
PPC_OP(glue(stwcx, MEMSUFFIX))
{
@@ -162,6 +314,22 @@ PPC_OP(glue(stwcx, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(stwcx_le, MEMSUFFIX))
+{
+ if (T0 & 0x03) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ if (regs->reserve != T0) {
+ env->crf[0] = xer_ov;
+ } else {
+ glue(st32r, MEMSUFFIX)(T0, T1);
+ env->crf[0] = xer_ov | 0x02;
+ }
+ }
+ regs->reserve = 0;
+ RETURN();
+}
+
PPC_OP(glue(dcbz, MEMSUFFIX))
{
glue(stl, MEMSUFFIX)(T0 + 0x00, 0);
@@ -188,4 +356,16 @@ PPC_OP(glue(ecowx, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(eciwx_le, MEMSUFFIX))
+{
+ T1 = glue(ld32r, MEMSUFFIX)(T0);
+ RETURN();
+}
+
+PPC_OP(glue(ecowx_le, MEMSUFFIX))
+{
+ glue(st32r, MEMSUFFIX)(T0, T1);
+ RETURN();
+}
+
#undef MEMSUFFIX
OpenPOWER on IntegriCloud