mmap.c 22.9 KB
Newer Older
bellard's avatar
bellard committed
1 2
/*
 *  mmap support for qemu
3
 *
bellard's avatar
bellard committed
4 5 6 7 8 9 10 11 12 13 14 15 16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 *  This program is free software; you can redistribute it and/or modify
 *  it under the terms of the GNU General Public License as published by
 *  the Free Software Foundation; either version 2 of the License, or
 *  (at your option) any later version.
 *
 *  This program is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *  GNU General Public License for more details.
 *
 *  You should have received a copy of the GNU General Public License
17
 *  along with this program; if not, see <http://www.gnu.org/licenses/>.
bellard's avatar
bellard committed
18 19 20 21 22 23 24
 */
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
25 26
#include <sys/types.h>
#include <sys/stat.h>
bellard's avatar
bellard committed
27
#include <sys/mman.h>
28 29
#include <linux/mman.h>
#include <linux/unistd.h>
bellard's avatar
bellard committed
30 31

#include "qemu.h"
32
#include "qemu-common.h"
bellard's avatar
bellard committed
33 34 35

//#define DEBUG_MMAP

36
#if defined(CONFIG_USE_NPTL)
37
static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
38
static __thread int mmap_lock_count;
pbrook's avatar
pbrook committed
39 40 41 42 43 44 45 46 47 48 49 50 51 52

void mmap_lock(void)
{
    if (mmap_lock_count++ == 0) {
        pthread_mutex_lock(&mmap_mutex);
    }
}

void mmap_unlock(void)
{
    if (--mmap_lock_count == 0) {
        pthread_mutex_unlock(&mmap_mutex);
    }
}
pbrook's avatar
pbrook committed
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68

/* Grab lock to make sure things are in a consistent state after fork().  */
void mmap_fork_start(void)
{
    if (mmap_lock_count)
        abort();
    pthread_mutex_lock(&mmap_mutex);
}

void mmap_fork_end(int child)
{
    if (child)
        pthread_mutex_init(&mmap_mutex, NULL);
    else
        pthread_mutex_unlock(&mmap_mutex);
}
pbrook's avatar
pbrook committed
69 70 71 72 73 74 75 76 77 78 79
#else
/* We aren't threadsafe to start with, so no need to worry about locking.  */
void mmap_lock(void)
{
}

void mmap_unlock(void)
{
}
#endif

80
/* NOTE: all the constants are the HOST ones, but addresses are target. */
81
int target_mprotect(abi_ulong start, abi_ulong len, int prot)
bellard's avatar
bellard committed
82
{
83
    abi_ulong end, host_start, host_end, addr;
bellard's avatar
bellard committed
84 85 86
    int prot1, ret;

#ifdef DEBUG_MMAP
87 88
    printf("mprotect: start=0x" TARGET_ABI_FMT_lx
           "len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c\n", start, len,
bellard's avatar
bellard committed
89 90 91 92 93 94 95 96 97 98 99
           prot & PROT_READ ? 'r' : '-',
           prot & PROT_WRITE ? 'w' : '-',
           prot & PROT_EXEC ? 'x' : '-');
#endif

    if ((start & ~TARGET_PAGE_MASK) != 0)
        return -EINVAL;
    len = TARGET_PAGE_ALIGN(len);
    end = start + len;
    if (end < start)
        return -EINVAL;
100
    prot &= PROT_READ | PROT_WRITE | PROT_EXEC;
bellard's avatar
bellard committed
101 102
    if (len == 0)
        return 0;
103

pbrook's avatar
pbrook committed
104
    mmap_lock();
105
    host_start = start & qemu_host_page_mask;
bellard's avatar
bellard committed
106 107 108 109 110 111 112
    host_end = HOST_PAGE_ALIGN(end);
    if (start > host_start) {
        /* handle host page containing start */
        prot1 = prot;
        for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
            prot1 |= page_get_flags(addr);
        }
113
        if (host_end == host_start + qemu_host_page_size) {
bellard's avatar
bellard committed
114 115 116 117 118
            for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
                prot1 |= page_get_flags(addr);
            }
            end = host_end;
        }
119
        ret = mprotect(g2h(host_start), qemu_host_page_size, prot1 & PAGE_BITS);
bellard's avatar
bellard committed
120
        if (ret != 0)
pbrook's avatar
pbrook committed
121
            goto error;
122
        host_start += qemu_host_page_size;
bellard's avatar
bellard committed
123 124 125 126 127 128
    }
    if (end < host_end) {
        prot1 = prot;
        for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
            prot1 |= page_get_flags(addr);
        }
129
        ret = mprotect(g2h(host_end - qemu_host_page_size), qemu_host_page_size,
bellard's avatar
bellard committed
130 131
                       prot1 & PAGE_BITS);
        if (ret != 0)
pbrook's avatar
pbrook committed
132
            goto error;
133
        host_end -= qemu_host_page_size;
bellard's avatar
bellard committed
134
    }
135

bellard's avatar
bellard committed
136 137
    /* handle the pages in the middle */
    if (host_start < host_end) {
138
        ret = mprotect(g2h(host_start), host_end - host_start, prot);
bellard's avatar
bellard committed
139
        if (ret != 0)
pbrook's avatar
pbrook committed
140
            goto error;
bellard's avatar
bellard committed
141 142
    }
    page_set_flags(start, start + len, prot | PAGE_VALID);
pbrook's avatar
pbrook committed
143
    mmap_unlock();
bellard's avatar
bellard committed
144
    return 0;
pbrook's avatar
pbrook committed
145 146 147
error:
    mmap_unlock();
    return ret;
bellard's avatar
bellard committed
148 149 150
}

/* map an incomplete host page */
151 152 153
static int mmap_frag(abi_ulong real_start,
                     abi_ulong start, abi_ulong end,
                     int prot, int flags, int fd, abi_ulong offset)
bellard's avatar
bellard committed
154
{
ths's avatar
ths committed
155
    abi_ulong real_end, addr;
156
    void *host_start;
bellard's avatar
bellard committed
157 158
    int prot1, prot_new;

159 160
    real_end = real_start + qemu_host_page_size;
    host_start = g2h(real_start);
bellard's avatar
bellard committed
161 162 163

    /* get the protection of the target pages outside the mapping */
    prot1 = 0;
164
    for(addr = real_start; addr < real_end; addr++) {
bellard's avatar
bellard committed
165 166 167
        if (addr < start || addr >= end)
            prot1 |= page_get_flags(addr);
    }
168

bellard's avatar
bellard committed
169 170
    if (prot1 == 0) {
        /* no page was there, so we allocate one */
ths's avatar
ths committed
171 172 173 174
        void *p = mmap(host_start, qemu_host_page_size, prot,
                       flags | MAP_ANONYMOUS, -1, 0);
        if (p == MAP_FAILED)
            return -1;
175
        prot1 = prot;
bellard's avatar
bellard committed
176 177 178 179 180 181 182 183 184
    }
    prot1 &= PAGE_BITS;

    prot_new = prot | prot1;
    if (!(flags & MAP_ANONYMOUS)) {
        /* msync() won't work here, so we return an error if write is
           possible while it is a shared mapping */
        if ((flags & MAP_TYPE) == MAP_SHARED &&
            (prot & PROT_WRITE))
185
            return -1;
bellard's avatar
bellard committed
186 187 188

        /* adjust protection to be able to read */
        if (!(prot1 & PROT_WRITE))
189
            mprotect(host_start, qemu_host_page_size, prot1 | PROT_WRITE);
190

bellard's avatar
bellard committed
191
        /* read the corresponding file data */
192 193
        if (pread(fd, g2h(start), end - start, offset) == -1)
            return -1;
194

bellard's avatar
bellard committed
195 196
        /* put final protection */
        if (prot_new != (prot1 | PROT_WRITE))
197
            mprotect(host_start, qemu_host_page_size, prot_new);
bellard's avatar
bellard committed
198 199 200
    } else {
        /* just update the protection */
        if (prot_new != prot1) {
201
            mprotect(host_start, qemu_host_page_size, prot_new);
bellard's avatar
bellard committed
202 203 204 205 206
        }
    }
    return 0;
}

207 208 209
#if HOST_LONG_BITS == 64 && TARGET_ABI_BITS == 64
# define TASK_UNMAPPED_BASE  (1ul << 38)
#elif defined(__CYGWIN__)
210
/* Cygwin doesn't have a whole lot of address space.  */
211
# define TASK_UNMAPPED_BASE  0x18000000
212
#else
213
# define TASK_UNMAPPED_BASE  0x40000000
214
#endif
215
static abi_ulong mmap_next_start = TASK_UNMAPPED_BASE;
216

pbrook's avatar
pbrook committed
217 218
unsigned long last_brk;

219 220 221 222 223 224 225 226 227
/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
   of guest address space.  */
static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
{
    abi_ulong addr;
    abi_ulong last_addr;
    int prot;
    int looped = 0;

228
    if (size > RESERVED_VA) {
229 230 231 232 233
        return (abi_ulong)-1;
    }

    last_addr = start;
    for (addr = start; last_addr + size != addr; addr += qemu_host_page_size) {
234
        if (last_addr + size >= RESERVED_VA
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
            || (abi_ulong)(last_addr + size) < last_addr) {
            if (looped) {
                return (abi_ulong)-1;
            }
            last_addr = qemu_host_page_size;
            addr = 0;
            looped = 1;
            continue;
        }
        prot = page_get_flags(addr);
        if (prot) {
            last_addr = addr + qemu_host_page_size;
        }
    }
    mmap_next_start = addr;
    return last_addr;
}

253 254 255 256 257 258
/*
 * Find and reserve a free memory area of size 'size'. The search
 * starts at 'start'.
 * It must be called with mmap_lock() held.
 * Return -1 if error.
 */
Riku Voipio's avatar
Riku Voipio committed
259
abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
260
{
261
    void *ptr, *prev;
262
    abi_ulong addr;
263
    int wrapped, repeat;
264 265

    /* If 'start' == 0, then a default start address is used. */
266
    if (start == 0) {
267
        start = mmap_next_start;
268 269 270 271 272
    } else {
        start &= qemu_host_page_mask;
    }

    size = HOST_PAGE_ALIGN(size);
273

274
    if (RESERVED_VA) {
275 276 277
        return mmap_find_vma_reserved(start, size);
    }

278
    addr = start;
279 280
    wrapped = repeat = 0;
    prev = 0;
281

282
    for (;; prev = ptr) {
283 284 285 286 287 288 289
        /*
         * Reserve needed memory area to avoid a race.
         * It should be discarded using:
         *  - mmap() with MAP_FIXED flag
         *  - mremap() with MREMAP_FIXED flag
         *  - shmat() with SHM_REMAP flag
         */
290
        ptr = mmap(g2h(addr), size, PROT_NONE,
291 292 293
                   MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);

        /* ENOMEM, if host address space has no memory */
294
        if (ptr == MAP_FAILED) {
295
            return (abi_ulong)-1;
296 297 298 299 300 301 302 303
        }

        /* Count the number of sequential returns of the same address.
           This is used to modify the search algorithm below.  */
        repeat = (ptr == prev ? repeat + 1 : 0);

        if (h2g_valid(ptr + size - 1)) {
            addr = h2g(ptr);
304

305 306 307 308 309 310 311
            if ((addr & ~TARGET_PAGE_MASK) == 0) {
                /* Success.  */
                if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
                    mmap_next_start = addr + size;
                }
                return addr;
            }
312

313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
            /* The address is not properly aligned for the target.  */
            switch (repeat) {
            case 0:
                /* Assume the result that the kernel gave us is the
                   first with enough free space, so start again at the
                   next higher target page.  */
                addr = TARGET_PAGE_ALIGN(addr);
                break;
            case 1:
                /* Sometimes the kernel decides to perform the allocation
                   at the top end of memory instead.  */
                addr &= TARGET_PAGE_MASK;
                break;
            case 2:
                /* Start over at low memory.  */
                addr = 0;
                break;
            default:
                /* Fail.  This unaligned block must the last.  */
                addr = -1;
                break;
            }
        } else {
            /* Since the result the kernel gave didn't fit, start
               again at low memory.  If any repetition, fail.  */
            addr = (repeat ? -1 : 0);
        }

        /* Unmap and try again.  */
342 343
        munmap(ptr, size);

344
        /* ENOMEM if we checked the whole of the target address space.  */
345
        if (addr == (abi_ulong)-1) {
346
            return (abi_ulong)-1;
347 348 349 350 351 352 353 354 355 356 357 358 359
        } else if (addr == 0) {
            if (wrapped) {
                return (abi_ulong)-1;
            }
            wrapped = 1;
            /* Don't actually use 0 when wrapping, instead indicate
               that we'd truely like an allocation in low memory.  */
            addr = (mmap_min_addr > TARGET_PAGE_SIZE
                     ? TARGET_PAGE_ALIGN(mmap_min_addr)
                     : TARGET_PAGE_SIZE);
        } else if (wrapped && addr >= start) {
            return (abi_ulong)-1;
        }
360 361 362
    }
}

bellard's avatar
bellard committed
363
/* NOTE: all the constants are the HOST ones */
364 365
abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
                     int flags, int fd, abi_ulong offset)
bellard's avatar
bellard committed
366
{
367
    abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
368
    unsigned long host_start;
bellard's avatar
bellard committed
369

pbrook's avatar
pbrook committed
370
    mmap_lock();
bellard's avatar
bellard committed
371 372
#ifdef DEBUG_MMAP
    {
373 374
        printf("mmap: start=0x" TARGET_ABI_FMT_lx
               " len=0x" TARGET_ABI_FMT_lx " prot=%c%c%c flags=",
375
               start, len,
bellard's avatar
bellard committed
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393
               prot & PROT_READ ? 'r' : '-',
               prot & PROT_WRITE ? 'w' : '-',
               prot & PROT_EXEC ? 'x' : '-');
        if (flags & MAP_FIXED)
            printf("MAP_FIXED ");
        if (flags & MAP_ANONYMOUS)
            printf("MAP_ANON ");
        switch(flags & MAP_TYPE) {
        case MAP_PRIVATE:
            printf("MAP_PRIVATE ");
            break;
        case MAP_SHARED:
            printf("MAP_SHARED ");
            break;
        default:
            printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
            break;
        }
394
        printf("fd=%d offset=" TARGET_ABI_FMT_lx "\n", fd, offset);
bellard's avatar
bellard committed
395 396 397
    }
#endif

398 399
    if (offset & ~TARGET_PAGE_MASK) {
        errno = EINVAL;
pbrook's avatar
pbrook committed
400
        goto fail;
401
    }
bellard's avatar
bellard committed
402 403 404

    len = TARGET_PAGE_ALIGN(len);
    if (len == 0)
pbrook's avatar
pbrook committed
405
        goto the_end;
406
    real_start = start & qemu_host_page_mask;
bellard's avatar
bellard committed
407

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
    /* When mapping files into a memory area larger than the file, accesses
       to pages beyond the file size will cause a SIGBUS. 

       For example, if mmaping a file of 100 bytes on a host with 4K pages
       emulating a target with 8K pages, the target expects to be able to
       access the first 8K. But the host will trap us on any access beyond
       4K.  

       When emulating a target with a larger page-size than the hosts, we
       may need to truncate file maps at EOF and add extra anonymous pages
       up to the targets page boundary.  */

    if ((qemu_real_host_page_size < TARGET_PAGE_SIZE)
        && !(flags & MAP_ANONYMOUS)) {
       struct stat sb;

       if (fstat (fd, &sb) == -1)
           goto fail;

       /* Are we trying to create a map beyond EOF?.  */
       if (offset + len > sb.st_size) {
           /* If so, truncate the file map at eof aligned with 
              the hosts real pagesize. Additional anonymous maps
              will be created beyond EOF.  */
           len = (sb.st_size - offset);
           len += qemu_real_host_page_size - 1;
           len &= ~(qemu_real_host_page_size - 1);
       }
    }

bellard's avatar
bellard committed
438
    if (!(flags & MAP_FIXED)) {
439 440 441 442 443 444 445 446
        abi_ulong mmap_start;
        void *p;
        host_offset = offset & qemu_host_page_mask;
        host_len = len + offset - host_offset;
        host_len = HOST_PAGE_ALIGN(host_len);
        mmap_start = mmap_find_vma(real_start, host_len);
        if (mmap_start == (abi_ulong)-1) {
            errno = ENOMEM;
pbrook's avatar
pbrook committed
447
            goto fail;
bellard's avatar
bellard committed
448
        }
449 450 451 452
        /* Note: we prefer to control the mapping address. It is
           especially important if qemu_host_page_size >
           qemu_real_host_page_size */
        p = mmap(g2h(mmap_start),
453
                 host_len, prot, flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
454
        if (p == MAP_FAILED)
pbrook's avatar
pbrook committed
455
            goto fail;
456 457
        /* update start so that it points to the file position at 'offset' */
        host_start = (unsigned long)p;
458 459 460
        if (!(flags & MAP_ANONYMOUS)) {
            p = mmap(g2h(mmap_start), len, prot, 
                     flags | MAP_FIXED, fd, host_offset);
461
            host_start += offset - host_offset;
462
        }
463 464 465
        start = h2g(host_start);
    } else {
        if (start & ~TARGET_PAGE_MASK) {
466
            errno = EINVAL;
pbrook's avatar
pbrook committed
467
            goto fail;
468
        }
469 470
        end = start + len;
        real_end = HOST_PAGE_ALIGN(end);
471

472 473 474 475 476 477 478 479 480 481
	/*
	 * Test if requested memory area fits target address space
	 * It can fail only on 64-bit host with 32-bit target.
	 * On any other target/host host mmap() handles this error correctly.
	 */
        if ((unsigned long)start + len - 1 > (abi_ulong) -1) {
            errno = EINVAL;
            goto fail;
        }

482 483 484 485 486 487 488 489 490
        /* worst case: we cannot map the file because the offset is not
           aligned, so we read it */
        if (!(flags & MAP_ANONYMOUS) &&
            (offset & ~qemu_host_page_mask) != (start & ~qemu_host_page_mask)) {
            /* msync() won't work here, so we return an error if write is
               possible while it is a shared mapping */
            if ((flags & MAP_TYPE) == MAP_SHARED &&
                (prot & PROT_WRITE)) {
                errno = EINVAL;
pbrook's avatar
pbrook committed
491
                goto fail;
492 493 494 495 496
            }
            retaddr = target_mmap(start, len, prot | PROT_WRITE,
                                  MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
                                  -1, 0);
            if (retaddr == -1)
pbrook's avatar
pbrook committed
497
                goto fail;
498 499
            if (pread(fd, g2h(start), len, offset) == -1)
                goto fail;
500 501
            if (!(prot & PROT_WRITE)) {
                ret = target_mprotect(start, len, prot);
pbrook's avatar
pbrook committed
502 503 504 505
                if (ret != 0) {
                    start = ret;
                    goto the_end;
                }
506 507
            }
            goto the_end;
bellard's avatar
bellard committed
508
        }
509 510 511 512 513 514 515 516
        
        /* handle the start of the mapping */
        if (start > real_start) {
            if (real_end == real_start + qemu_host_page_size) {
                /* one single host page */
                ret = mmap_frag(real_start, start, end,
                                prot, flags, fd, offset);
                if (ret == -1)
pbrook's avatar
pbrook committed
517
                    goto fail;
518 519 520
                goto the_end1;
            }
            ret = mmap_frag(real_start, start, real_start + qemu_host_page_size,
bellard's avatar
bellard committed
521 522
                            prot, flags, fd, offset);
            if (ret == -1)
pbrook's avatar
pbrook committed
523
                goto fail;
524 525 526 527 528 529 530 531 532
            real_start += qemu_host_page_size;
        }
        /* handle the end of the mapping */
        if (end < real_end) {
            ret = mmap_frag(real_end - qemu_host_page_size,
                            real_end - qemu_host_page_size, real_end,
                            prot, flags, fd,
                            offset + real_end - qemu_host_page_size - start);
            if (ret == -1)
pbrook's avatar
pbrook committed
533
                goto fail;
534
            real_end -= qemu_host_page_size;
bellard's avatar
bellard committed
535
        }
536

537 538 539 540 541 542 543 544 545 546 547
        /* map the middle (easier) */
        if (real_start < real_end) {
            void *p;
            unsigned long offset1;
            if (flags & MAP_ANONYMOUS)
                offset1 = 0;
            else
                offset1 = offset + real_start - start;
            p = mmap(g2h(real_start), real_end - real_start,
                     prot, flags, fd, offset1);
            if (p == MAP_FAILED)
pbrook's avatar
pbrook committed
548
                goto fail;
549
        }
bellard's avatar
bellard committed
550 551 552 553 554
    }
 the_end1:
    page_set_flags(start, start + len, prot | PAGE_VALID);
 the_end:
#ifdef DEBUG_MMAP
555
    printf("ret=0x" TARGET_ABI_FMT_lx "\n", start);
bellard's avatar
bellard committed
556 557 558
    page_dump(stdout);
    printf("\n");
#endif
pbrook's avatar
pbrook committed
559
    mmap_unlock();
bellard's avatar
bellard committed
560
    return start;
pbrook's avatar
pbrook committed
561 562 563
fail:
    mmap_unlock();
    return -1;
bellard's avatar
bellard committed
564 565
}

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
static void mmap_reserve(abi_ulong start, abi_ulong size)
{
    abi_ulong real_start;
    abi_ulong real_end;
    abi_ulong addr;
    abi_ulong end;
    int prot;

    real_start = start & qemu_host_page_mask;
    real_end = HOST_PAGE_ALIGN(start + size);
    end = start + size;
    if (start > real_start) {
        /* handle host page containing start */
        prot = 0;
        for (addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
            prot |= page_get_flags(addr);
        }
        if (real_end == real_start + qemu_host_page_size) {
            for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
                prot |= page_get_flags(addr);
            }
            end = real_end;
        }
        if (prot != 0)
            real_start += qemu_host_page_size;
    }
    if (end < real_end) {
        prot = 0;
        for (addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
            prot |= page_get_flags(addr);
        }
        if (prot != 0)
            real_end -= qemu_host_page_size;
    }
    if (real_start != real_end) {
        mmap(g2h(real_start), real_end - real_start, PROT_NONE,
                 MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE,
                 -1, 0);
    }
}

607
int target_munmap(abi_ulong start, abi_ulong len)
bellard's avatar
bellard committed
608
{
609
    abi_ulong end, real_start, real_end, addr;
bellard's avatar
bellard committed
610 611 612
    int prot, ret;

#ifdef DEBUG_MMAP
613 614 615
    printf("munmap: start=0x" TARGET_ABI_FMT_lx " len=0x"
           TARGET_ABI_FMT_lx "\n",
           start, len);
bellard's avatar
bellard committed
616 617 618 619 620 621
#endif
    if (start & ~TARGET_PAGE_MASK)
        return -EINVAL;
    len = TARGET_PAGE_ALIGN(len);
    if (len == 0)
        return -EINVAL;
pbrook's avatar
pbrook committed
622
    mmap_lock();
bellard's avatar
bellard committed
623
    end = start + len;
624 625
    real_start = start & qemu_host_page_mask;
    real_end = HOST_PAGE_ALIGN(end);
bellard's avatar
bellard committed
626

627
    if (start > real_start) {
bellard's avatar
bellard committed
628 629
        /* handle host page containing start */
        prot = 0;
630
        for(addr = real_start; addr < start; addr += TARGET_PAGE_SIZE) {
bellard's avatar
bellard committed
631 632
            prot |= page_get_flags(addr);
        }
633 634
        if (real_end == real_start + qemu_host_page_size) {
            for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
bellard's avatar
bellard committed
635 636
                prot |= page_get_flags(addr);
            }
637
            end = real_end;
bellard's avatar
bellard committed
638
        }
bellard's avatar
bellard committed
639
        if (prot != 0)
640
            real_start += qemu_host_page_size;
bellard's avatar
bellard committed
641
    }
642
    if (end < real_end) {
bellard's avatar
bellard committed
643
        prot = 0;
644
        for(addr = end; addr < real_end; addr += TARGET_PAGE_SIZE) {
bellard's avatar
bellard committed
645 646 647
            prot |= page_get_flags(addr);
        }
        if (prot != 0)
648
            real_end -= qemu_host_page_size;
bellard's avatar
bellard committed
649
    }
650

pbrook's avatar
pbrook committed
651
    ret = 0;
bellard's avatar
bellard committed
652
    /* unmap what we can */
653
    if (real_start < real_end) {
654
        if (RESERVED_VA) {
655 656 657 658
            mmap_reserve(real_start, real_end - real_start);
        } else {
            ret = munmap(g2h(real_start), real_end - real_start);
        }
bellard's avatar
bellard committed
659 660
    }

pbrook's avatar
pbrook committed
661 662 663 664
    if (ret == 0)
        page_set_flags(start, start + len, 0);
    mmap_unlock();
    return ret;
bellard's avatar
bellard committed
665 666
}

667 668 669
abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
                       abi_ulong new_size, unsigned long flags,
                       abi_ulong new_addr)
bellard's avatar
bellard committed
670 671
{
    int prot;
672
    void *host_addr;
bellard's avatar
bellard committed
673

pbrook's avatar
pbrook committed
674
    mmap_lock();
675

676
    if (flags & MREMAP_FIXED) {
677 678 679
        host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
                                     old_size, new_size,
                                     flags,
680 681
                                     g2h(new_addr));

682
        if (RESERVED_VA && host_addr != MAP_FAILED) {
683 684 685 686 687
            /* If new and old addresses overlap then the above mremap will
               already have failed with EINVAL.  */
            mmap_reserve(old_addr, old_size);
        }
    } else if (flags & MREMAP_MAYMOVE) {
688 689 690 691 692 693 694
        abi_ulong mmap_start;

        mmap_start = mmap_find_vma(0, new_size);

        if (mmap_start == -1) {
            errno = ENOMEM;
            host_addr = MAP_FAILED;
695
        } else {
696 697 698 699
            host_addr = (void *) syscall(__NR_mremap, g2h(old_addr),
                                         old_size, new_size,
                                         flags | MREMAP_FIXED,
                                         g2h(mmap_start));
700 701
            mmap_reserve(old_addr, old_size);
        }
702
    } else {
703
        int prot = 0;
704
        if (RESERVED_VA && old_size < new_size) {
705 706 707 708 709 710 711 712 713
            abi_ulong addr;
            for (addr = old_addr + old_size;
                 addr < old_addr + new_size;
                 addr++) {
                prot |= page_get_flags(addr);
            }
        }
        if (prot == 0) {
            host_addr = mremap(g2h(old_addr), old_size, new_size, flags);
714
            if (host_addr != MAP_FAILED && RESERVED_VA && old_size > new_size) {
715 716 717 718 719 720
                mmap_reserve(old_addr + old_size, new_size - old_size);
            }
        } else {
            errno = ENOMEM;
            host_addr = MAP_FAILED;
        }
721 722 723 724 725 726 727 728 729 730
        /* Check if address fits target address space */
        if ((unsigned long)host_addr + new_size > (abi_ulong)-1) {
            /* Revert mremap() changes */
            host_addr = mremap(g2h(old_addr), new_size, old_size, flags);
            errno = ENOMEM;
            host_addr = MAP_FAILED;
        }
    }

    if (host_addr == MAP_FAILED) {
pbrook's avatar
pbrook committed
731 732 733 734 735 736 737 738
        new_addr = -1;
    } else {
        new_addr = h2g(host_addr);
        prot = page_get_flags(old_addr);
        page_set_flags(old_addr, old_addr + old_size, 0);
        page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
    }
    mmap_unlock();
bellard's avatar
bellard committed
739 740 741
    return new_addr;
}

742
int target_msync(abi_ulong start, abi_ulong len, int flags)
bellard's avatar
bellard committed
743
{
744
    abi_ulong end;
bellard's avatar
bellard committed
745 746 747 748 749

    if (start & ~TARGET_PAGE_MASK)
        return -EINVAL;
    len = TARGET_PAGE_ALIGN(len);
    end = start + len;
bellard's avatar
bellard committed
750 751 752 753
    if (end < start)
        return -EINVAL;
    if (end == start)
        return 0;
754

755
    start &= qemu_host_page_mask;
756
    return msync(g2h(start), end - start, flags);
bellard's avatar
bellard committed
757
}