mmap.c 23.4 KB
Newer Older
bellard's avatar
bellard committed
1 2
/*
 *  mmap support for qemu
3
 *
bellard's avatar
bellard committed
4 5 6 7 8 9 10 11 12 13 14 15 16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
17
 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
bellard's avatar
bellard committed
18 19 20 21 22 23 24
 */
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
25 26
#include <sys/types.h>
#include <sys/stat.h>
bellard's avatar
bellard committed
27
#include <sys/mman.h>
28 29
#include <linux/mman.h>
#include <linux/unistd.h>
bellard's avatar
bellard committed
30 31

#include "qemu.h"
32
#include "qemu-common.h"
bellard's avatar
bellard committed
33 34 35

//#define DEBUG_MMAP

36
#if defined(CONFIG_USE_NPTL)
37
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38
static __thread int mmap_lock_count;
39 40 41 42 43 44 45 46 47 48 49 50 51 52

void mmap_lock(void)
{
    if (mmap_lock_count++ == 0) {
        pthread_mutex_lock(&mmap_mutex);
    }
}

void mmap_unlock(void)
{
    if (--mmap_lock_count == 0) {
        pthread_mutex_unlock(&mmap_mutex);
    }
}
pbrook's avatar
pbrook committed
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

/* Grab lock to make sure things are in a consistent state after fork().  */
void mmap_fork_start(void)
{
    if (mmap_lock_count)
        abort();
    pthread_mutex_lock(&mmap_mutex);
}

void mmap_fork_end(int child)
{
    if (child)
        pthread_mutex_init(&mmap_mutex, NULL);
    else
        pthread_mutex_unlock(&mmap_mutex);
}
69 70 71 72 73 74 75 76 77 78 79
#else
/* We aren't threadsafe to start with, so no need to worry about locking.  */
void mmap_lock(void)
{
}

void mmap_unlock(void)
{
}
#endif

80
/* NOTE: all the constants are the HOST ones, but addresses are target. */
81
int target_mprotect(abi_ulong start, abi_ulong len, int prot)
bellard's avatar
bellard committed
82
{
83
    abi_ulong end, host_start, host_end, addr;
bellard's avatar
bellard committed
84 85 86
    int prot1, ret;

#ifdef DEBUG_MMAP
87 88
    printf("mprotect: start=0x" TARGET_ABI_FMT_lx
           "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
bellard's avatar
bellard committed
89 90 91 92 93 94 95 96 97 98 99
           prot & PROT_READ ? 'r' : '-',
           prot & PROT_WRITE ? 'w' : '-',
           prot & PROT_EXEC ? 'x' : '-');
#endif

    if ((start & ~TARGET_PAGE_MASK) != 0)
        return -EINVAL;
    len = TARGET_PAGE_ALIGN(len);
    end = start + len;
    if (end < start)
        return -EINVAL;
100
    prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
bellard's avatar
bellard committed
101 102
    if (len == 0)
        return 0;
103

104
    mmap_lock();
105
    host_start = start & qemu_host_page_mask;
bellard's avatar
bellard committed
106 107 108 109 110 111 112
    host_end = HOST_PAGE_ALIGN(end);
    if (start > host_start) {
        /* handle host page containing start */
        prot1 = prot;
        for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
            prot1 |= page_get_flags(addr);
        }
113
        if (host_end == host_start + qemu_host_page_size) {
bellard's avatar
bellard committed
114 115 116 117 118
            for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
                prot1 |= page_get_flags(addr);
            }
            end = host_end;
        }
119
        ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
bellard's avatar
bellard committed
120
        if (ret != 0)
121
            goto error;
122
        host_start += qemu_host_page_size;
bellard's avatar
bellard committed
123 124 125 126 127 128
    }
    if (end < host_end) {
        prot1 = prot;
        for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
            prot1 |= page_get_flags(addr);
        }
129
        ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
bellard's avatar
bellard committed
130 131
                       prot1 & PAGE_BITS);
        if (ret != 0)
132
            goto error;
133
        host_end -= qemu_host_page_size;
bellard's avatar
bellard committed
134
    }
135

bellard's avatar
bellard committed
136 137
    /* handle the pages in the middle */
    if (host_start < host_end) {
138
        ret = mprotect(g2h(host_start), host_end - host_start, prot);
bellard's avatar
bellard committed
139
        if (ret != 0)
140
            goto error;
bellard's avatar
bellard committed
141 142
    }
    page_set_flags(start, start + len, prot | PAGE_VALID);
143
    mmap_unlock();
bellard's avatar
bellard committed
144
    return 0;
145 146 147
error:
    mmap_unlock();
    return ret;
bellard's avatar
bellard committed
148 149 150
}

/* map an incomplete host page */
151 152 153
static int mmap_frag(abi_ulong real_start,
                     abi_ulong start, abi_ulong end,
                     int prot, int flags, int fd, abi_ulong offset)
bellard's avatar
bellard committed
154
{
155
    abi_ulong real_end, addr;
156
    void *host_start;
bellard's avatar
bellard committed
157 158
    int prot1, prot_new;

159 160
    real_end = real_start + qemu_host_page_size;
    host_start = g2h(real_start);
bellard's avatar
bellard committed
161 162 163

    /* get the protection of the target pages outside the mapping */
    prot1 = 0;
164
    for(addr = real_start; addr < real_end; addr++) {
bellard's avatar
bellard committed
165 166 167
        if (addr < start || addr >= end)
            prot1 |= page_get_flags(addr);
    }
168

bellard's avatar
bellard committed
169 170
    if (prot1 == 0) {
        /* no page was there, so we allocate one */
171 172 173 174
        void *p = mmap(host_start, qemu_host_page_size, prot,
                       flags | MAP_ANONYMOUS, -1, 0);
        if (p == MAP_FAILED)
            return -1;
175
        prot1 = prot;
bellard's avatar
bellard committed
176 177 178 179 180 181 182 183 184
    }
    prot1 &= PAGE_BITS;

    prot_new = prot | prot1;
    if (!(flags & MAP_ANONYMOUS)) {
        /* msync() won't work here, so we return an error if write is
           possible while it is a shared mapping */
        if ((flags & MAP_TYPE) == MAP_SHARED &&
            (prot & PROT_WRITE))
185
            return -1;
bellard's avatar
bellard committed
186 187 188

        /* adjust protection to be able to read */
        if (!(prot1 & PROT_WRITE))
189
            mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
190

bellard's avatar
bellard committed
191
        /* read the corresponding file data */
192 193
        if (pread(fd, g2h(start), end - start, offset) == -1)
            return -1;
194

bellard's avatar
bellard committed
195 196
        /* put final protection */
        if (prot_new != (prot1 | PROT_WRITE))
197
            mprotect(host_start, qemu_host_page_size, prot_new);
bellard's avatar
bellard committed
198 199 200
    } else {
        /* just update the protection */
        if (prot_new != prot1) {
201
            mprotect(host_start, qemu_host_page_size, prot_new);
bellard's avatar
bellard committed
202 203 204 205 206
        }
    }
    return 0;
}

207 208 209
#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
# define TASK_UNMAPPED_BASE  (1ul << 38)
#elif defined(__CYGWIN__)
210
/* Cygwin doesn't have a whole lot of address space.  */
211
# define TASK_UNMAPPED_BASE  0x18000000
212
#else
213
# define TASK_UNMAPPED_BASE  0x40000000
214
#endif
215
abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
216

217 218
unsigned long last_brk;

219
#ifdef CONFIG_USE_GUEST_BASE
220 221 222 223 224
/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
   of guest address space.  */
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
{
    abi_ulong addr;
225
    abi_ulong end_addr;
226 227 228
    int prot;
    int looped = 0;

229
    if (size > RESERVED_VA) {
230 231 232
        return (abi_ulong)-1;
    }

233 234 235 236 237 238 239 240 241
    size = HOST_PAGE_ALIGN(size);
    end_addr = start + size;
    if (end_addr > RESERVED_VA) {
        end_addr = RESERVED_VA;
    }
    addr = end_addr - qemu_host_page_size;

    while (1) {
        if (addr > end_addr) {
242 243 244
            if (looped) {
                return (abi_ulong)-1;
            }
245 246
            end_addr = RESERVED_VA;
            addr = end_addr - qemu_host_page_size;
247 248 249 250 251
            looped = 1;
            continue;
        }
        prot = page_get_flags(addr);
        if (prot) {
252 253 254 255
            end_addr = addr;
        }
        if (addr + size == end_addr) {
            break;
256
        }
257
        addr -= qemu_host_page_size;
258
    }
259 260 261 262 263 264

    if (start == mmap_next_start) {
        mmap_next_start = addr;
    }

    return addr;
265
}
266
#endif
267

268 269 270 271 272 273
/*
 * Find and reserve a free memory area of size 'size'. The search
 * starts at 'start'.
 * It must be called with mmap_lock() held.
 * Return -1 if error.
 */
274
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
275
{
276
    void *ptr, *prev;
277
    abi_ulong addr;
278
    int wrapped, repeat;
279 280

    /* If 'start' == 0, then a default start address is used. */
281
    if (start == 0) {
282
        start = mmap_next_start;
283 284 285 286 287
    } else {
        start &= qemu_host_page_mask;
    }

    size = HOST_PAGE_ALIGN(size);
288

289
#ifdef CONFIG_USE_GUEST_BASE
290
    if (RESERVED_VA) {
291 292
        return mmap_find_vma_reserved(start, size);
    }
293
#endif
294

295
    addr = start;
296 297
    wrapped = repeat = 0;
    prev = 0;
298

299
    for (;; prev = ptr) {
300 301 302 303 304 305 306
        /*
         * Reserve needed memory area to avoid a race.
         * It should be discarded using:
         *  - mmap() with MAP_FIXED flag
         *  - mremap() with MREMAP_FIXED flag
         *  - shmat() with SHM_REMAP flag
         */
307
        ptr = mmap(g2h(addr), size, PROT_NONE,
308 309 310
                   MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);

        /* ENOMEM, if host address space has no memory */
311
        if (ptr == MAP_FAILED) {
312
            return (abi_ulong)-1;
313 314 315 316 317 318 319 320
        }

        /* Count the number of sequential returns of the same address.
           This is used to modify the search algorithm below.  */
        repeat = (ptr == prev ? repeat + 1 : 0);

        if (h2g_valid(ptr + size - 1)) {
            addr = h2g(ptr);
321

322 323 324 325 326 327 328
            if ((addr & ~TARGET_PAGE_MASK) == 0) {
                /* Success.  */
                if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
                    mmap_next_start = addr + size;
                }
                return addr;
            }
329

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
            /* The address is not properly aligned for the target.  */
            switch (repeat) {
            case 0:
                /* Assume the result that the kernel gave us is the
                   first with enough free space, so start again at the
                   next higher target page.  */
                addr = TARGET_PAGE_ALIGN(addr);
                break;
            case 1:
                /* Sometimes the kernel decides to perform the allocation
                   at the top end of memory instead.  */
                addr &= TARGET_PAGE_MASK;
                break;
            case 2:
                /* Start over at low memory.  */
                addr = 0;
                break;
            default:
                /* Fail.  This unaligned block must the last.  */
                addr = -1;
                break;
            }
        } else {
            /* Since the result the kernel gave didn't fit, start
               again at low memory.  If any repetition, fail.  */
            addr = (repeat ? -1 : 0);
        }

        /* Unmap and try again.  */
359 360
        munmap(ptr, size);

361
        /* ENOMEM if we checked the whole of the target address space.  */
362
        if (addr == (abi_ulong)-1) {
363
            return (abi_ulong)-1;
364 365 366 367 368 369
        } else if (addr == 0) {
            if (wrapped) {
                return (abi_ulong)-1;
            }
            wrapped = 1;
            /* Don't actually use 0 when wrapping, instead indicate
370
               that we'd truly like an allocation in low memory.  */
371 372 373 374 375 376
            addr = (mmap_min_addr > TARGET_PAGE_SIZE
                     ? TARGET_PAGE_ALIGN(mmap_min_addr)
                     : TARGET_PAGE_SIZE);
        } else if (wrapped && addr >= start) {
            return (abi_ulong)-1;
        }
377 378 379
    }
}

bellard's avatar
bellard committed
380
/* NOTE: all the constants are the HOST ones */
381 382
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
                     int flags, int fd, abi_ulong offset)
bellard's avatar
bellard committed
383
{
384
    abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
385
    unsigned long host_start;
bellard's avatar
bellard committed
386

387
    mmap_lock();
bellard's avatar
bellard committed
388 389
#ifdef DEBUG_MMAP
    {
390 391
        printf("mmap: start=0x" TARGET_ABI_FMT_lx
               " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
392
               start, len,
bellard's avatar
bellard committed
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
               prot & PROT_READ ? 'r' : '-',
               prot & PROT_WRITE ? 'w' : '-',
               prot & PROT_EXEC ? 'x' : '-');
        if (flags & MAP_FIXED)
            printf("MAP_FIXED ");
        if (flags & MAP_ANONYMOUS)
            printf("MAP_ANON ");
        switch(flags & MAP_TYPE) {
        case MAP_PRIVATE:
            printf("MAP_PRIVATE ");
            break;
        case MAP_SHARED:
            printf("MAP_SHARED ");
            break;
        default:
            printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
            break;
        }
411
        printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
bellard's avatar
bellard committed
412 413 414
    }
#endif

415 416
    if (offset & ~TARGET_PAGE_MASK) {
        errno = EINVAL;
417
        goto fail;
418
    }
bellard's avatar
bellard committed
419 420 421

    len = TARGET_PAGE_ALIGN(len);
    if (len == 0)
422
        goto the_end;
423
    real_start = start & qemu_host_page_mask;
bellard's avatar
bellard committed
424

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
    /* When mapping files into a memory area larger than the file, accesses
       to pages beyond the file size will cause a SIGBUS. 

       For example, if mmaping a file of 100 bytes on a host with 4K pages
       emulating a target with 8K pages, the target expects to be able to
       access the first 8K. But the host will trap us on any access beyond
       4K.  

       When emulating a target with a larger page-size than the hosts, we
       may need to truncate file maps at EOF and add extra anonymous pages
       up to the targets page boundary.  */

    if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
        && !(flags & MAP_ANONYMOUS)) {
       struct stat sb;

       if (fstat (fd, &sb) == -1)
           goto fail;

       /* Are we trying to create a map beyond EOF?.  */
       if (offset + len > sb.st_size) {
           /* If so, truncate the file map at eof aligned with 
              the hosts real pagesize. Additional anonymous maps
              will be created beyond EOF.  */
           len = (sb.st_size - offset);
           len += qemu_real_host_page_size - 1;
           len &= ~(qemu_real_host_page_size - 1);
       }
    }

bellard's avatar
bellard committed
455
    if (!(flags & MAP_FIXED)) {
456 457 458 459 460 461 462 463
        abi_ulong mmap_start;
        void *p;
        host_offset = offset & qemu_host_page_mask;
        host_len = len + offset - host_offset;
        host_len = HOST_PAGE_ALIGN(host_len);
        mmap_start = mmap_find_vma(real_start, host_len);
        if (mmap_start == (abi_ulong)-1) {
            errno = ENOMEM;
464
            goto fail;
bellard's avatar
bellard committed
465
        }
466 467 468 469
        /* Note: we prefer to control the mapping address. It is
           especially important if qemu_host_page_size >
           qemu_real_host_page_size */
        p = mmap(g2h(mmap_start),
470
                 host_len, prot, flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
471
        if (p == MAP_FAILED)
472
            goto fail;
473 474
        /* update start so that it points to the file position at 'offset' */
        host_start = (unsigned long)p;
475 476 477
        if (!(flags & MAP_ANONYMOUS)) {
            p = mmap(g2h(mmap_start), len, prot, 
                     flags | MAP_FIXED, fd, host_offset);
478
            host_start += offset - host_offset;
479
        }
480 481 482
        start = h2g(host_start);
    } else {
        if (start & ~TARGET_PAGE_MASK) {
483
            errno = EINVAL;
484
            goto fail;
485
        }
486 487
        end = start + len;
        real_end = HOST_PAGE_ALIGN(end);
488

489 490 491 492 493 494 495 496 497 498
	/*
	 * Test if requested memory area fits target address space
	 * It can fail only on 64-bit host with 32-bit target.
	 * On any other target/host host mmap() handles this error correctly.
	 */
        if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
            errno = EINVAL;
            goto fail;
        }

499 500 501 502 503 504 505 506 507
        /* worst case: we cannot map the file because the offset is not
           aligned, so we read it */
        if (!(flags & MAP_ANONYMOUS) &&
            (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
            /* msync() won't work here, so we return an error if write is
               possible while it is a shared mapping */
            if ((flags & MAP_TYPE) == MAP_SHARED &&
                (prot & PROT_WRITE)) {
                errno = EINVAL;
508
                goto fail;
509 510 511 512 513
            }
            retaddr = target_mmap(start, len, prot | PROT_WRITE,
                                  MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
                                  -1, 0);
            if (retaddr == -1)
514
                goto fail;
515 516
            if (pread(fd, g2h(start), len, offset) == -1)
                goto fail;
517 518
            if (!(prot & PROT_WRITE)) {
                ret = target_mprotect(start, len, prot);
519 520 521 522
                if (ret != 0) {
                    start = ret;
                    goto the_end;
                }
523 524
            }
            goto the_end;
bellard's avatar
bellard committed
525
        }
526 527 528 529 530 531 532 533
        
        /* handle the start of the mapping */
        if (start > real_start) {
            if (real_end == real_start + qemu_host_page_size) {
                /* one single host page */
                ret = mmap_frag(real_start, start, end,
                                prot, flags, fd, offset);
                if (ret == -1)
534
                    goto fail;
535 536 537
                goto the_end1;
            }
            ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
bellard's avatar
bellard committed
538 539
                            prot, flags, fd, offset);
            if (ret == -1)
540
                goto fail;
541 542 543 544 545 546 547 548 549
            real_start += qemu_host_page_size;
        }
        /* handle the end of the mapping */
        if (end < real_end) {
            ret = mmap_frag(real_end - qemu_host_page_size,
                            real_end - qemu_host_page_size, real_end,
                            prot, flags, fd,
                            offset + real_end - qemu_host_page_size - start);
            if (ret == -1)
550
                goto fail;
551
            real_end -= qemu_host_page_size;
bellard's avatar
bellard committed
552
        }
553

554 555 556 557 558 559 560 561 562 563 564
        /* map the middle (easier) */
        if (real_start < real_end) {
            void *p;
            unsigned long offset1;
            if (flags & MAP_ANONYMOUS)
                offset1 = 0;
            else
                offset1 = offset + real_start - start;
            p = mmap(g2h(real_start), real_end - real_start,
                     prot, flags, fd, offset1);
            if (p == MAP_FAILED)
565
                goto fail;
566
        }
bellard's avatar
bellard committed
567 568 569 570 571
    }
 the_end1:
    page_set_flags(start, start + len, prot | PAGE_VALID);
 the_end:
#ifdef DEBUG_MMAP
572
    printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
bellard's avatar
bellard committed
573 574 575
    page_dump(stdout);
    printf("\n");
#endif
576
    tb_invalidate_phys_range(start, start + len, 0);
577
    mmap_unlock();
bellard's avatar
bellard committed
578
    return start;
579 580 581
fail:
    mmap_unlock();
    return -1;
bellard's avatar
bellard committed
582 583
}

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
static void mmap_reserve(abi_ulong start, abi_ulong size)
{
    abi_ulong real_start;
    abi_ulong real_end;
    abi_ulong addr;
    abi_ulong end;
    int prot;

    real_start = start & qemu_host_page_mask;
    real_end = HOST_PAGE_ALIGN(start + size);
    end = start + size;
    if (start > real_start) {
        /* handle host page containing start */
        prot = 0;
        for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
            prot |= page_get_flags(addr);
        }
        if (real_end == real_start + qemu_host_page_size) {
            for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
                prot |= page_get_flags(addr);
            }
            end = real_end;
        }
        if (prot != 0)
            real_start += qemu_host_page_size;
    }
    if (end < real_end) {
        prot = 0;
        for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
            prot |= page_get_flags(addr);
        }
        if (prot != 0)
            real_end -= qemu_host_page_size;
    }
    if (real_start != real_end) {
        mmap(g2h(real_start), real_end - real_start, PROT_NONE,
                 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
                 -1, 0);
    }
}

625
int target_munmap(abi_ulong start, abi_ulong len)
bellard's avatar
bellard committed
626
{
627
    abi_ulong end, real_start, real_end, addr;
bellard's avatar
bellard committed
628 629 630
    int prot, ret;

#ifdef DEBUG_MMAP
631 632 633
    printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
           TARGET_ABI_FMT_lx "\n",
           start, len);
bellard's avatar
bellard committed
634 635 636 637 638 639
#endif
    if (start & ~TARGET_PAGE_MASK)
        return -EINVAL;
    len = TARGET_PAGE_ALIGN(len);
    if (len == 0)
        return -EINVAL;
640
    mmap_lock();
bellard's avatar
bellard committed
641
    end = start + len;
642 643
    real_start = start & qemu_host_page_mask;
    real_end = HOST_PAGE_ALIGN(end);
bellard's avatar
bellard committed
644

645
    if (start > real_start) {
bellard's avatar
bellard committed
646 647
        /* handle host page containing start */
        prot = 0;
648
        for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
bellard's avatar
bellard committed
649 650
            prot |= page_get_flags(addr);
        }
651 652
        if (real_end == real_start + qemu_host_page_size) {
            for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
bellard's avatar
bellard committed
653 654
                prot |= page_get_flags(addr);
            }
655
            end = real_end;
bellard's avatar
bellard committed
656
        }
bellard's avatar
bellard committed
657
        if (prot != 0)
658
            real_start += qemu_host_page_size;
bellard's avatar
bellard committed
659
    }
660
    if (end < real_end) {
bellard's avatar
bellard committed
661
        prot = 0;
662
        for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
bellard's avatar
bellard committed
663 664 665
            prot |= page_get_flags(addr);
        }
        if (prot != 0)
666
            real_end -= qemu_host_page_size;
bellard's avatar
bellard committed
667
    }
668

669
    ret = 0;
bellard's avatar
bellard committed
670
    /* unmap what we can */
671
    if (real_start < real_end) {
672
        if (RESERVED_VA) {
673 674 675 676
            mmap_reserve(real_start, real_end - real_start);
        } else {
            ret = munmap(g2h(real_start), real_end - real_start);
        }
bellard's avatar
bellard committed
677 678
    }

679
    if (ret == 0) {
680
        page_set_flags(start, start + len, 0);
681 682
        tb_invalidate_phys_range(start, start + len, 0);
    }
683 684
    mmap_unlock();
    return ret;
bellard's avatar
bellard committed
685 686
}

687 688 689
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
                       abi_ulong new_size, unsigned long flags,
                       abi_ulong new_addr)
bellard's avatar
bellard committed
690 691
{
    int prot;
692
    void *host_addr;
bellard's avatar
bellard committed
693

694
    mmap_lock();
695

696
    if (flags & MREMAP_FIXED) {
697 698 699
        host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
                                     old_size, new_size,
                                     flags,
700 701
                                     g2h(new_addr));

702
        if (RESERVED_VA && host_addr != MAP_FAILED) {
703 704 705 706 707
            /* If new and old addresses overlap then the above mremap will
               already have failed with EINVAL.  */
            mmap_reserve(old_addr, old_size);
        }
    } else if (flags & MREMAP_MAYMOVE) {
708 709 710 711 712 713 714
        abi_ulong mmap_start;

        mmap_start = mmap_find_vma(0, new_size);

        if (mmap_start == -1) {
            errno = ENOMEM;
            host_addr = MAP_FAILED;
715
        } else {
716 717 718 719
            host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
                                         old_size, new_size,
                                         flags | MREMAP_FIXED,
                                         g2h(mmap_start));
720 721 722
            if ( RESERVED_VA ) {
                mmap_reserve(old_addr, old_size);
            }
723
        }
724
    } else {
725
        int prot = 0;
726
        if (RESERVED_VA && old_size < new_size) {
727 728 729 730 731 732 733 734 735
            abi_ulong addr;
            for (addr = old_addr + old_size;
                 addr < old_addr + new_size;
                 addr++) {
                prot |= page_get_flags(addr);
            }
        }
        if (prot == 0) {
            host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
736
            if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
737 738 739 740 741 742
                mmap_reserve(old_addr + old_size, new_size - old_size);
            }
        } else {
            errno = ENOMEM;
            host_addr = MAP_FAILED;
        }
743 744 745 746 747 748 749 750 751 752
        /* Check if address fits target address space */
        if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
            /* Revert mremap() changes */
            host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
            errno = ENOMEM;
            host_addr = MAP_FAILED;
        }
    }

    if (host_addr == MAP_FAILED) {
753 754 755 756 757 758 759
        new_addr = -1;
    } else {
        new_addr = h2g(host_addr);
        prot = page_get_flags(old_addr);
        page_set_flags(old_addr, old_addr + old_size, 0);
        page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
    }
760
    tb_invalidate_phys_range(new_addr, new_addr + new_size, 0);
761
    mmap_unlock();
bellard's avatar
bellard committed
762 763 764
    return new_addr;
}

765
int target_msync(abi_ulong start, abi_ulong len, int flags)
bellard's avatar
bellard committed
766
{
767
    abi_ulong end;
bellard's avatar
bellard committed
768 769 770 771 772

    if (start & ~TARGET_PAGE_MASK)
        return -EINVAL;
    len = TARGET_PAGE_ALIGN(len);
    end = start + len;
bellard's avatar
bellard committed
773 774 775 776
    if (end < start)
        return -EINVAL;
    if (end == start)
        return 0;
777

778
    start &= qemu_host_page_mask;
779
    return msync(g2h(start), end - start, flags);
bellard's avatar
bellard committed
780
}