softmmu_template.h 17.4 KB
Newer Older
1 2
/*
 *  Software MMU support
3
 *
Blue Swirl's avatar
Blue Swirl committed
4 5 6 7 8
 * Generate helpers used by TCG for qemu_ld/st ops and code load
 * functions.
 *
 * Included from target op helpers and exec.c.
 *
9 10 11 12 13 14 15 16 17 18 19 20 21
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
22
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23
 */
24
#include "qemu/timer.h"
25
#include "exec/address-spaces.h"
26
#include "exec/memory.h"
27

28 29 30 31
#define DATA_SIZE (1 << SHIFT)

#if DATA_SIZE == 8
#define SUFFIX q
32
#define LSUFFIX q
33
#define SDATA_TYPE  int64_t
34
#define DATA_TYPE  uint64_t
35 36
#elif DATA_SIZE == 4
#define SUFFIX l
37
#define LSUFFIX l
38
#define SDATA_TYPE  int32_t
39
#define DATA_TYPE  uint32_t
40 41
#elif DATA_SIZE == 2
#define SUFFIX w
42
#define LSUFFIX uw
43
#define SDATA_TYPE  int16_t
44
#define DATA_TYPE  uint16_t
45 46
#elif DATA_SIZE == 1
#define SUFFIX b
47
#define LSUFFIX ub
48
#define SDATA_TYPE  int8_t
49
#define DATA_TYPE  uint8_t
50 51 52 53
#else
#error unsupported data size
#endif

54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

/* For the benefit of TCG generated code, we want to avoid the complication
   of ABI-specific return type promotion and always return a value extended
   to the register size of the host.  This is tcg_target_long, except in the
   case of a 32-bit host and 64-bit data, and for that we always have
   uint64_t.  Don't bother with this widened value for SOFTMMU_CODE_ACCESS.  */
#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
# define WORD_TYPE  DATA_TYPE
# define USUFFIX    SUFFIX
#else
# define WORD_TYPE  tcg_target_ulong
# define USUFFIX    glue(u, SUFFIX)
# define SSUFFIX    glue(s, SUFFIX)
#endif

bellard's avatar
bellard committed
69 70
#ifdef SOFTMMU_CODE_ACCESS
#define READ_ACCESS_TYPE 2
bellard's avatar
bellard committed
71
#define ADDR_READ addr_code
bellard's avatar
bellard committed
72 73
#else
#define READ_ACCESS_TYPE 0
bellard's avatar
bellard committed
74
#define ADDR_READ addr_read
bellard's avatar
bellard committed
75 76
#endif

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
#if DATA_SIZE == 8
# define BSWAP(X)  bswap64(X)
#elif DATA_SIZE == 4
# define BSWAP(X)  bswap32(X)
#elif DATA_SIZE == 2
# define BSWAP(X)  bswap16(X)
#else
# define BSWAP(X)  (X)
#endif

#ifdef TARGET_WORDS_BIGENDIAN
# define TGT_BE(X)  (X)
# define TGT_LE(X)  BSWAP(X)
#else
# define TGT_BE(X)  BSWAP(X)
# define TGT_LE(X)  (X)
#endif

#if DATA_SIZE == 1
# define helper_le_ld_name  glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name  helper_le_ld_name
# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name helper_le_lds_name
# define helper_le_st_name  glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name  helper_le_st_name
#else
# define helper_le_ld_name  glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name  glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
# define helper_le_st_name  glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name  glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
#endif

#ifdef TARGET_WORDS_BIGENDIAN
# define helper_te_ld_name  helper_be_ld_name
# define helper_te_st_name  helper_be_st_name
#else
# define helper_te_ld_name  helper_le_ld_name
# define helper_te_st_name  helper_le_st_name
#endif

119
#ifndef SOFTMMU_CODE_ACCESS
120
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
121
                                              hwaddr physaddr,
pbrook's avatar
pbrook committed
122
                                              target_ulong addr,
123
                                              uintptr_t retaddr)
124
{
125
    uint64_t val;
126 127
    CPUState *cpu = ENV_GET_CPU(env);
    MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
128

pbrook's avatar
pbrook committed
129
    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
130
    cpu->mem_io_pc = retaddr;
131
    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
132
        cpu_io_recompile(cpu, retaddr);
pbrook's avatar
pbrook committed
133
    }
134

135
    cpu->mem_io_vaddr = addr;
136 137
    io_mem_read(mr, physaddr, &val, 1 << SHIFT);
    return val;
138
}
139
#endif
140

141
#ifdef SOFTMMU_CODE_ACCESS
142
static __attribute__((unused))
143
#endif
144 145
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
                            uintptr_t retaddr)
146
{
147 148 149
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    uintptr_t haddr;
150
    DATA_TYPE res;
151

152 153 154
    /* Adjust the given return address.  */
    retaddr -= GETPC_ADJ;

155 156 157
    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
158
#ifdef ALIGNED_ONLY
159
        if ((addr & (DATA_SIZE - 1)) != 0) {
160 161
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                 mmu_idx, retaddr);
162
        }
163
#endif
164
        tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
165 166 167 168 169 170 171 172
        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        hwaddr ioaddr;
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
173
        }
174
        ioaddr = env->iotlb[mmu_idx][index];
175 176 177 178 179 180

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
        res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
        res = TGT_LE(res);
        return res;
181 182 183 184 185 186 187
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                    >= TARGET_PAGE_SIZE)) {
        target_ulong addr1, addr2;
188
        DATA_TYPE res1, res2;
189 190
        unsigned shift;
    do_unaligned_access:
191
#ifdef ALIGNED_ONLY
192 193
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                             mmu_idx, retaddr);
194
#endif
195 196
        addr1 = addr & ~(DATA_SIZE - 1);
        addr2 = addr1 + DATA_SIZE;
197 198
        /* Note the adjustment at the beginning of the function.
           Undo that for the recursion.  */
199 200
        res1 = helper_le_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
        res2 = helper_le_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
201
        shift = (addr & (DATA_SIZE - 1)) * 8;
202 203

        /* Little-endian combine.  */
204
        res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
205 206 207 208 209 210
        return res;
    }

    /* Handle aligned access or unaligned access in the same page.  */
#ifdef ALIGNED_ONLY
    if ((addr & (DATA_SIZE - 1)) != 0) {
211 212
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                             mmu_idx, retaddr);
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
    }
#endif

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
    res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
#else
    res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
#endif
    return res;
}

#if DATA_SIZE > 1
#ifdef SOFTMMU_CODE_ACCESS
static __attribute__((unused))
#endif
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr, int mmu_idx,
                            uintptr_t retaddr)
{
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    uintptr_t haddr;
    DATA_TYPE res;

    /* Adjust the given return address.  */
    retaddr -= GETPC_ADJ;

    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
        if ((addr & (DATA_SIZE - 1)) != 0) {
245 246
            cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                                 mmu_idx, retaddr);
247 248
        }
#endif
249
        tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        hwaddr ioaddr;
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }
        ioaddr = env->iotlb[mmu_idx][index];

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
        res = glue(io_read, SUFFIX)(env, ioaddr, addr, retaddr);
        res = TGT_BE(res);
        return res;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                    >= TARGET_PAGE_SIZE)) {
        target_ulong addr1, addr2;
        DATA_TYPE res1, res2;
        unsigned shift;
    do_unaligned_access:
#ifdef ALIGNED_ONLY
277 278
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                             mmu_idx, retaddr);
279
#endif
280 281 282 283 284 285 286 287 288 289
        addr1 = addr & ~(DATA_SIZE - 1);
        addr2 = addr1 + DATA_SIZE;
        /* Note the adjustment at the beginning of the function.
           Undo that for the recursion.  */
        res1 = helper_be_ld_name(env, addr1, mmu_idx, retaddr + GETPC_ADJ);
        res2 = helper_be_ld_name(env, addr2, mmu_idx, retaddr + GETPC_ADJ);
        shift = (addr & (DATA_SIZE - 1)) * 8;

        /* Big-endian combine.  */
        res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
290 291 292 293 294 295
        return res;
    }

    /* Handle aligned access or unaligned access in the same page.  */
#ifdef ALIGNED_ONLY
    if ((addr & (DATA_SIZE - 1)) != 0) {
296 297
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                             mmu_idx, retaddr);
298
    }
299 300 301
#endif

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
302 303
    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
    return res;
304
}
305
#endif /* DATA_SIZE > 1 */
306

307 308 309 310
DATA_TYPE
glue(glue(helper_ld, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
                                         int mmu_idx)
{
311
    return helper_te_ld_name (env, addr, mmu_idx, GETRA());
312 313
}

bellard's avatar
bellard committed
314 315
#ifndef SOFTMMU_CODE_ACCESS

316 317 318
/* Provide signed versions of the load routines as well.  We can of course
   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
319 320 321 322 323 324 325 326 327
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
                             int mmu_idx, uintptr_t retaddr)
{
    return (SDATA_TYPE)helper_le_ld_name(env, addr, mmu_idx, retaddr);
}

# if DATA_SIZE > 1
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
                             int mmu_idx, uintptr_t retaddr)
328
{
329
    return (SDATA_TYPE)helper_be_ld_name(env, addr, mmu_idx, retaddr);
330
}
331
# endif
332 333
#endif

334
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
335
                                          hwaddr physaddr,
bellard's avatar
bellard committed
336
                                          DATA_TYPE val,
pbrook's avatar
pbrook committed
337
                                          target_ulong addr,
338
                                          uintptr_t retaddr)
bellard's avatar
bellard committed
339
{
340 341
    CPUState *cpu = ENV_GET_CPU(env);
    MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
342

pbrook's avatar
pbrook committed
343
    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
344
    if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {
345
        cpu_io_recompile(cpu, retaddr);
pbrook's avatar
pbrook committed
346
    }
bellard's avatar
bellard committed
347

348 349
    cpu->mem_io_vaddr = addr;
    cpu->mem_io_pc = retaddr;
350
    io_mem_write(mr, physaddr, val, 1 << SHIFT);
bellard's avatar
bellard committed
351
}
352

353 354
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                       int mmu_idx, uintptr_t retaddr)
355
{
356 357 358
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    uintptr_t haddr;
359

360 361 362
    /* Adjust the given return address.  */
    retaddr -= GETPC_ADJ;

363 364 365
    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
366
#ifdef ALIGNED_ONLY
367
        if ((addr & (DATA_SIZE - 1)) != 0) {
368
            cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
369
        }
370
#endif
371
        tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
372 373 374 375 376 377 378 379 380 381
        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        hwaddr ioaddr;
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }
        ioaddr = env->iotlb[mmu_idx][index];
382 383 384 385

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
        val = TGT_LE(val);
386 387 388 389 390 391 392 393 394 395
        glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
        return;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                     >= TARGET_PAGE_SIZE)) {
        int i;
    do_unaligned_access:
396
#ifdef ALIGNED_ONLY
397
        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
398 399 400 401 402
#endif
        /* XXX: not efficient, but simple */
        /* Note: relies on the fact that tlb_fill() does not remove the
         * previous page from the TLB cache.  */
        for (i = DATA_SIZE - 1; i >= 0; i--) {
403
            /* Little-endian extract.  */
404
            uint8_t val8 = val >> (i * 8);
405 406 407 408 409 410 411 412 413 414 415
            /* Note the adjustment at the beginning of the function.
               Undo that for the recursion.  */
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
                                            mmu_idx, retaddr + GETPC_ADJ);
        }
        return;
    }

    /* Handle aligned access or unaligned access in the same page.  */
#ifdef ALIGNED_ONLY
    if ((addr & (DATA_SIZE - 1)) != 0) {
416
        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
417 418 419 420 421 422 423 424
    }
#endif

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
#else
    glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
425
#endif
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
}

#if DATA_SIZE > 1
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
                       int mmu_idx, uintptr_t retaddr)
{
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    uintptr_t haddr;

    /* Adjust the given return address.  */
    retaddr -= GETPC_ADJ;

    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
#ifdef ALIGNED_ONLY
        if ((addr & (DATA_SIZE - 1)) != 0) {
444
            cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
445 446
        }
#endif
447
        tlb_fill(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
        tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        hwaddr ioaddr;
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }
        ioaddr = env->iotlb[mmu_idx][index];

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
        val = TGT_BE(val);
        glue(io_write, SUFFIX)(env, ioaddr, val, addr, retaddr);
        return;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                     >= TARGET_PAGE_SIZE)) {
        int i;
    do_unaligned_access:
#ifdef ALIGNED_ONLY
473
        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
474 475 476 477 478 479 480
#endif
        /* XXX: not efficient, but simple */
        /* Note: relies on the fact that tlb_fill() does not remove the
         * previous page from the TLB cache.  */
        for (i = DATA_SIZE - 1; i >= 0; i--) {
            /* Big-endian extract.  */
            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
481 482
            /* Note the adjustment at the beginning of the function.
               Undo that for the recursion.  */
483
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
484
                                            mmu_idx, retaddr + GETPC_ADJ);
485
        }
486 487 488 489
        return;
    }

    /* Handle aligned access or unaligned access in the same page.  */
490
#ifdef ALIGNED_ONLY
491
    if ((addr & (DATA_SIZE - 1)) != 0) {
492
        cpu_unaligned_access(ENV_GET_CPU(env), addr, 1, mmu_idx, retaddr);
493
    }
494 495 496
#endif

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
497
    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
498
}
499
#endif /* DATA_SIZE > 1 */
500

501 502 503 504
void
glue(glue(helper_st, SUFFIX), MMUSUFFIX)(CPUArchState *env, target_ulong addr,
                                         DATA_TYPE val, int mmu_idx)
{
505
    helper_te_st_name(env, addr, val, mmu_idx, GETRA());
506 507
}

bellard's avatar
bellard committed
508 509 510
#endif /* !defined(SOFTMMU_CODE_ACCESS) */

#undef READ_ACCESS_TYPE
511 512 513
#undef SHIFT
#undef DATA_TYPE
#undef SUFFIX
514
#undef LSUFFIX
515
#undef DATA_SIZE
bellard's avatar
bellard committed
516
#undef ADDR_READ
517 518 519 520
#undef WORD_TYPE
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX
521 522 523 524 525 526 527 528 529 530 531 532 533
#undef BSWAP
#undef TGT_BE
#undef TGT_LE
#undef CPU_BE
#undef CPU_LE
#undef helper_le_ld_name
#undef helper_be_ld_name
#undef helper_le_lds_name
#undef helper_be_lds_name
#undef helper_le_st_name
#undef helper_be_st_name
#undef helper_te_ld_name
#undef helper_te_st_name