main-loop.c 13.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * QEMU System Emulator
 *
 * Copyright (c) 2003-2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */

25
#include "qemu-common.h"
26
#include "qemu/timer.h"
27
#include "qemu/sockets.h"	// struct in_addr needed for libslirp.h
28
#include "sysemu/qtest.h"
29
#include "slirp/libslirp.h"
30
#include "qemu/main-loop.h"
31
#include "block/aio.h"
32 33 34

#ifndef _WIN32

35
#include "qemu/compatfd.h"
36

37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
/* If we have signalfd, we mask out the signals we want to handle and then
 * use signalfd to listen for them.  We rely on whatever the current signal
 * handler is to dispatch the signals when we receive them.
 */
static void sigfd_handler(void *opaque)
{
    int fd = (intptr_t)opaque;
    struct qemu_signalfd_siginfo info;
    struct sigaction action;
    ssize_t len;

    while (1) {
        do {
            len = read(fd, &info, sizeof(info));
        } while (len == -1 && errno == EINTR);

        if (len == -1 && errno == EAGAIN) {
            break;
        }

        if (len != sizeof(info)) {
            printf("read from sigfd returned %zd: %m\n", len);
            return;
        }

        sigaction(info.ssi_signo, NULL, &action);
        if ((action.sa_flags & SA_SIGINFO) && action.sa_sigaction) {
            action.sa_sigaction(info.ssi_signo,
                                (siginfo_t *)&info, NULL);
        } else if (action.sa_handler) {
            action.sa_handler(info.ssi_signo);
        }
    }
}

static int qemu_signal_init(void)
{
    int sigfd;
    sigset_t set;

    /*
     * SIG_IPI must be blocked in the main thread and must not be caught
     * by sigwait() in the signal thread. Otherwise, the cpu thread will
     * not catch it reliably.
     */
    sigemptyset(&set);
    sigaddset(&set, SIG_IPI);
    sigaddset(&set, SIGIO);
    sigaddset(&set, SIGALRM);
    sigaddset(&set, SIGBUS);
87 88 89 90 91
    /* SIGINT cannot be handled via signalfd, so that ^C can be used
     * to interrupt QEMU when it is being run under gdb.  SIGHUP and
     * SIGTERM are also handled asynchronously, even though it is not
     * strictly necessary, because they use the same handler as SIGINT.
     */
92 93
    pthread_sigmask(SIG_BLOCK, &set, NULL);

Lai Jiangshan's avatar
Lai Jiangshan committed
94
    sigdelset(&set, SIG_IPI);
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
    sigfd = qemu_signalfd(&set);
    if (sigfd == -1) {
        fprintf(stderr, "failed to create signalfd\n");
        return -errno;
    }

    fcntl_setfl(sigfd, O_NONBLOCK);

    qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL,
                         (void *)(intptr_t)sigfd);

    return 0;
}

#else /* _WIN32 */

111
static int qemu_signal_init(void)
112 113 114
{
    return 0;
}
115 116 117
#endif

static AioContext *qemu_aio_context;
118

119 120 121 122 123
AioContext *qemu_get_aio_context(void)
{
    return qemu_aio_context;
}

124 125
void qemu_notify_event(void)
{
126
    if (!qemu_aio_context) {
127 128
        return;
    }
129
    aio_notify(qemu_aio_context);
130 131
}

132 133
static GArray *gpollfds;

134
int qemu_init_main_loop(Error **errp)
135 136
{
    int ret;
137
    GSource *src;
138
    Error *local_error = NULL;
139

140 141
    init_clocks();

142 143 144 145 146
    ret = qemu_signal_init();
    if (ret) {
        return ret;
    }

147 148 149 150 151
    qemu_aio_context = aio_context_new(&local_error);
    if (!qemu_aio_context) {
        error_propagate(errp, local_error);
        return -EMFILE;
    }
152
    gpollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
153 154 155
    src = aio_get_g_source(qemu_aio_context);
    g_source_attach(src, NULL);
    g_source_unref(src);
156 157 158 159 160
    return 0;
}

static int max_priority;

161
#ifndef _WIN32
162 163 164
static int glib_pollfds_idx;
static int glib_n_poll_fds;

165
static void glib_pollfds_fill(int64_t *cur_timeout)
166 167
{
    GMainContext *context = g_main_context_default();
168
    int timeout = 0;
169
    int64_t timeout_ns;
170
    int n;
171 172 173

    g_main_context_prepare(context, &max_priority);

174 175 176 177 178 179 180 181 182 183
    glib_pollfds_idx = gpollfds->len;
    n = glib_n_poll_fds;
    do {
        GPollFD *pfds;
        glib_n_poll_fds = n;
        g_array_set_size(gpollfds, glib_pollfds_idx + glib_n_poll_fds);
        pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
        n = g_main_context_query(context, max_priority, &timeout, pfds,
                                 glib_n_poll_fds);
    } while (n != glib_n_poll_fds);
184

185 186 187 188
    if (timeout < 0) {
        timeout_ns = -1;
    } else {
        timeout_ns = (int64_t)timeout * (int64_t)SCALE_MS;
189
    }
190 191

    *cur_timeout = qemu_soonest_timeout(timeout_ns, *cur_timeout);
192 193
}

194
static void glib_pollfds_poll(void)
195 196
{
    GMainContext *context = g_main_context_default();
197
    GPollFD *pfds = &g_array_index(gpollfds, GPollFD, glib_pollfds_idx);
198

199
    if (g_main_context_check(context, max_priority, pfds, glib_n_poll_fds)) {
200 201 202 203
        g_main_context_dispatch(context);
    }
}

204 205
#define MAX_MAIN_LOOP_SPIN (1000)

206
static int os_host_main_loop_wait(int64_t timeout)
207 208
{
    int ret;
209
    static int spin_counter;
210

211
    glib_pollfds_fill(&timeout);
212

213 214 215 216 217 218
    /* If the I/O thread is very busy or we are incorrectly busy waiting in
     * the I/O thread, this can lead to starvation of the BQL such that the
     * VCPU threads never run.  To make sure we can detect the later case,
     * print a message to the screen.  If we run into this condition, create
     * a fake timeout in order to give the VCPU threads a chance to run.
     */
219
    if (!timeout && (spin_counter > MAX_MAIN_LOOP_SPIN)) {
220 221
        static bool notified;

222
        if (!notified && !qtest_enabled()) {
223 224 225 226 227 228
            fprintf(stderr,
                    "main-loop: WARNING: I/O thread spun for %d iterations\n",
                    MAX_MAIN_LOOP_SPIN);
            notified = true;
        }

229
        timeout = SCALE_MS;
230 231
    }

232
    if (timeout) {
233
        spin_counter = 0;
234
        qemu_mutex_unlock_iothread();
235 236
    } else {
        spin_counter++;
237 238
    }

239
    ret = qemu_poll_ns((GPollFD *)gpollfds->data, gpollfds->len, timeout);
240

241
    if (timeout) {
242 243 244
        qemu_mutex_lock_iothread();
    }

245
    glib_pollfds_poll();
246 247 248
    return ret;
}
#else
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
/***********************************************************/
/* Polling handling */

typedef struct PollingEntry {
    PollingFunc *func;
    void *opaque;
    struct PollingEntry *next;
} PollingEntry;

static PollingEntry *first_polling_entry;

int qemu_add_polling_cb(PollingFunc *func, void *opaque)
{
    PollingEntry **ppe, *pe;
    pe = g_malloc0(sizeof(PollingEntry));
    pe->func = func;
    pe->opaque = opaque;
    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next);
    *ppe = pe;
    return 0;
}

void qemu_del_polling_cb(PollingFunc *func, void *opaque)
{
    PollingEntry **ppe, *pe;
    for(ppe = &first_polling_entry; *ppe != NULL; ppe = &(*ppe)->next) {
        pe = *ppe;
        if (pe->func == func && pe->opaque == opaque) {
            *ppe = pe->next;
            g_free(pe);
            break;
        }
    }
}

/***********************************************************/
/* Wait objects support */
typedef struct WaitObjects {
    int num;
288
    int revents[MAXIMUM_WAIT_OBJECTS + 1];
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
    HANDLE events[MAXIMUM_WAIT_OBJECTS + 1];
    WaitObjectFunc *func[MAXIMUM_WAIT_OBJECTS + 1];
    void *opaque[MAXIMUM_WAIT_OBJECTS + 1];
} WaitObjects;

static WaitObjects wait_objects = {0};

int qemu_add_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
{
    WaitObjects *w = &wait_objects;
    if (w->num >= MAXIMUM_WAIT_OBJECTS) {
        return -1;
    }
    w->events[w->num] = handle;
    w->func[w->num] = func;
    w->opaque[w->num] = opaque;
305
    w->revents[w->num] = 0;
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
    w->num++;
    return 0;
}

void qemu_del_wait_object(HANDLE handle, WaitObjectFunc *func, void *opaque)
{
    int i, found;
    WaitObjects *w = &wait_objects;

    found = 0;
    for (i = 0; i < w->num; i++) {
        if (w->events[i] == handle) {
            found = 1;
        }
        if (found) {
            w->events[i] = w->events[i + 1];
            w->func[i] = w->func[i + 1];
            w->opaque[i] = w->opaque[i + 1];
324
            w->revents[i] = w->revents[i + 1];
325 326 327 328 329 330 331
        }
    }
    if (found) {
        w->num--;
    }
}

332 333
void qemu_fd_register(int fd)
{
334 335
    WSAEventSelect(fd, event_notifier_get_handle(&qemu_aio_context->notifier),
                   FD_READ | FD_ACCEPT | FD_CLOSE |
336 337 338
                   FD_CONNECT | FD_WRITE | FD_OOB);
}

339 340 341 342 343 344 345 346 347 348
static int pollfds_fill(GArray *pollfds, fd_set *rfds, fd_set *wfds,
                        fd_set *xfds)
{
    int nfds = -1;
    int i;

    for (i = 0; i < pollfds->len; i++) {
        GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
        int fd = pfd->fd;
        int events = pfd->events;
349
        if (events & G_IO_IN) {
350 351 352
            FD_SET(fd, rfds);
            nfds = MAX(nfds, fd);
        }
353
        if (events & G_IO_OUT) {
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
            FD_SET(fd, wfds);
            nfds = MAX(nfds, fd);
        }
        if (events & G_IO_PRI) {
            FD_SET(fd, xfds);
            nfds = MAX(nfds, fd);
        }
    }
    return nfds;
}

static void pollfds_poll(GArray *pollfds, int nfds, fd_set *rfds,
                         fd_set *wfds, fd_set *xfds)
{
    int i;

    for (i = 0; i < pollfds->len; i++) {
        GPollFD *pfd = &g_array_index(pollfds, GPollFD, i);
        int fd = pfd->fd;
        int revents = 0;

        if (FD_ISSET(fd, rfds)) {
376
            revents |= G_IO_IN;
377 378
        }
        if (FD_ISSET(fd, wfds)) {
379
            revents |= G_IO_OUT;
380 381 382 383 384 385 386 387
        }
        if (FD_ISSET(fd, xfds)) {
            revents |= G_IO_PRI;
        }
        pfd->revents = revents & pfd->events;
    }
}

388
static int os_host_main_loop_wait(int64_t timeout)
389
{
390
    GMainContext *context = g_main_context_default();
391
    GPollFD poll_fds[1024 * 2]; /* this is probably overkill */
392
    int select_ret = 0;
393
    int g_poll_ret, ret, i, n_poll_fds;
394
    PollingEntry *pe;
395
    WaitObjects *w = &wait_objects;
396
    gint poll_timeout;
397
    int64_t poll_timeout_ns;
398
    static struct timeval tv0;
399 400
    fd_set rfds, wfds, xfds;
    int nfds;
401 402 403 404 405 406

    /* XXX: need to suppress polling by better using win32 events */
    ret = 0;
    for (pe = first_polling_entry; pe != NULL; pe = pe->next) {
        ret |= pe->func(pe->opaque);
    }
407 408 409
    if (ret != 0) {
        return ret;
    }
410

411 412 413 414 415 416 417 418 419 420 421 422 423 424
    FD_ZERO(&rfds);
    FD_ZERO(&wfds);
    FD_ZERO(&xfds);
    nfds = pollfds_fill(gpollfds, &rfds, &wfds, &xfds);
    if (nfds >= 0) {
        select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
        if (select_ret != 0) {
            timeout = 0;
        }
        if (select_ret > 0) {
            pollfds_poll(gpollfds, nfds, &rfds, &wfds, &xfds);
        }
    }

425
    g_main_context_prepare(context, &max_priority);
426
    n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
427 428 429
                                      poll_fds, ARRAY_SIZE(poll_fds));
    g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds));

430
    for (i = 0; i < w->num; i++) {
431
        poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i];
432
        poll_fds[n_poll_fds + i].events = G_IO_IN;
433 434
    }

435 436 437 438
    if (poll_timeout < 0) {
        poll_timeout_ns = -1;
    } else {
        poll_timeout_ns = (int64_t)poll_timeout * (int64_t)SCALE_MS;
439 440
    }

441 442
    poll_timeout_ns = qemu_soonest_timeout(poll_timeout_ns, timeout);

443
    qemu_mutex_unlock_iothread();
444 445
    g_poll_ret = qemu_poll_ns(poll_fds, n_poll_fds + w->num, poll_timeout_ns);

446
    qemu_mutex_lock_iothread();
447
    if (g_poll_ret > 0) {
448
        for (i = 0; i < w->num; i++) {
449
            w->revents[i] = poll_fds[n_poll_fds + i].revents;
450
        }
451 452 453
        for (i = 0; i < w->num; i++) {
            if (w->revents[i] && w->func[i]) {
                w->func[i](w->opaque[i]);
454 455 456 457
            }
        }
    }

458 459 460 461
    if (g_main_context_check(context, max_priority, poll_fds, n_poll_fds)) {
        g_main_context_dispatch(context);
    }

462
    return select_ret || g_poll_ret;
463 464 465 466 467
}
#endif

int main_loop_wait(int nonblocking)
{
468 469
    int ret;
    uint32_t timeout = UINT32_MAX;
470
    int64_t timeout_ns;
471 472 473 474 475 476

    if (nonblocking) {
        timeout = 0;
    }

    /* poll any events */
477
    g_array_set_size(gpollfds, 0); /* reset for new iteration */
478 479
    /* XXX: separate device handlers from system ones */
#ifdef CONFIG_SLIRP
480
    slirp_pollfds_fill(gpollfds, &timeout);
481
#endif
482
    qemu_iohandler_fill(gpollfds);
483 484 485 486 487 488 489 490 491 492 493 494

    if (timeout == UINT32_MAX) {
        timeout_ns = -1;
    } else {
        timeout_ns = (uint64_t)timeout * (int64_t)(SCALE_MS);
    }

    timeout_ns = qemu_soonest_timeout(timeout_ns,
                                      timerlistgroup_deadline_ns(
                                          &main_loop_tlg));

    ret = os_host_main_loop_wait(timeout_ns);
495
    qemu_iohandler_poll(gpollfds, ret);
496
#ifdef CONFIG_SLIRP
497
    slirp_pollfds_poll(gpollfds, (ret < 0));
498 499
#endif

500
    qemu_clock_run_all_timers();
501 502 503

    return ret;
}
504 505 506 507 508 509 510

/* Functions to operate on the main QEMU AioContext.  */

QEMUBH *qemu_bh_new(QEMUBHFunc *cb, void *opaque)
{
    return aio_bh_new(qemu_aio_context, cb, opaque);
}