The data contained in this repository can be downloaded to your computer using one of several clients.
Please see the documentation of your version control software client for more information.

Please select the desired protocol below to get the URL.

This URL has Read-Only access.

Statistics
| Branch: | Revision:

main_repo / deps / libev / ev.c @ 90fc8d36

History | View | Annotate | Download (68 KB)

1 c5183738 Ryan
/*
2
 * libev event processing core, watcher management
3
 *
4
 * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libev@schmorp.de>
5
 * All rights reserved.
6
 *
7
 * Redistribution and use in source and binary forms, with or without modifica-
8
 * tion, are permitted provided that the following conditions are met:
9
 * 
10
 *   1.  Redistributions of source code must retain the above copyright notice,
11
 *       this list of conditions and the following disclaimer.
12
 * 
13
 *   2.  Redistributions in binary form must reproduce the above copyright
14
 *       notice, this list of conditions and the following disclaimer in the
15
 *       documentation and/or other materials provided with the distribution.
16
 * 
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
18
 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
19
 * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
20
 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
21
 * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
23
 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
24
 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
25
 * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
26
 * OF THE POSSIBILITY OF SUCH DAMAGE.
27
 *
28
 * Alternatively, the contents of this file may be used under the terms of
29
 * the GNU General Public License ("GPL") version 2 or any later version,
30
 * in which case the provisions of the GPL are applicable instead of
31
 * the above. If you wish to allow the use of your version of this file
32
 * only under the terms of the GPL and not to allow others to use your
33
 * version of this file under the BSD license, indicate your decision
34
 * by deleting the provisions above and replace them with the notice
35
 * and other provisions required by the GPL. If you do not delete the
36
 * provisions above, a recipient may use your version of this file under
37
 * either the BSD or the GPL.
38
 */
39
40
#ifdef __cplusplus
41
extern "C" {
42
#endif
43
44
/* this big block deduces configuration from config.h */
45
#ifndef EV_STANDALONE
46
# ifdef EV_CONFIG_H
47
#  include EV_CONFIG_H
48
# else
49
#  include "config.h"
50
# endif
51
52
# if HAVE_CLOCK_SYSCALL
53
#  ifndef EV_USE_CLOCK_SYSCALL
54
#   define EV_USE_CLOCK_SYSCALL 1
55
#   ifndef EV_USE_REALTIME
56
#    define EV_USE_REALTIME  0
57
#   endif
58
#   ifndef EV_USE_MONOTONIC
59
#    define EV_USE_MONOTONIC 1
60
#   endif
61
#  endif
62
# endif
63
64
# if HAVE_CLOCK_GETTIME
65
#  ifndef EV_USE_MONOTONIC
66
#   define EV_USE_MONOTONIC 1
67
#  endif
68
#  ifndef EV_USE_REALTIME
69
#   define EV_USE_REALTIME  0
70
#  endif
71
# else
72
#  ifndef EV_USE_MONOTONIC
73
#   define EV_USE_MONOTONIC 0
74
#  endif
75
#  ifndef EV_USE_REALTIME
76
#   define EV_USE_REALTIME  0
77
#  endif
78
# endif
79
80
# ifndef EV_USE_NANOSLEEP
81
#  if HAVE_NANOSLEEP
82
#   define EV_USE_NANOSLEEP 1
83
#  else
84
#   define EV_USE_NANOSLEEP 0
85
#  endif
86
# endif
87
88
# ifndef EV_USE_SELECT
89
#  if HAVE_SELECT && HAVE_SYS_SELECT_H
90
#   define EV_USE_SELECT 1
91
#  else
92
#   define EV_USE_SELECT 0
93
#  endif
94
# endif
95
96
# ifndef EV_USE_POLL
97
#  if HAVE_POLL && HAVE_POLL_H
98
#   define EV_USE_POLL 1
99
#  else
100
#   define EV_USE_POLL 0
101
#  endif
102
# endif
103
   
104
# ifndef EV_USE_EPOLL
105
#  if HAVE_EPOLL_CTL && HAVE_SYS_EPOLL_H
106
#   define EV_USE_EPOLL 1
107
#  else
108
#   define EV_USE_EPOLL 0
109
#  endif
110
# endif
111
   
112
# ifndef EV_USE_KQUEUE
113
#  if HAVE_KQUEUE && HAVE_SYS_EVENT_H && HAVE_SYS_QUEUE_H
114
#   define EV_USE_KQUEUE 1
115
#  else
116
#   define EV_USE_KQUEUE 0
117
#  endif
118
# endif
119
   
120
# ifndef EV_USE_PORT
121
#  if HAVE_PORT_H && HAVE_PORT_CREATE
122
#   define EV_USE_PORT 1
123
#  else
124
#   define EV_USE_PORT 0
125
#  endif
126
# endif
127
128
# ifndef EV_USE_INOTIFY
129
#  if HAVE_INOTIFY_INIT && HAVE_SYS_INOTIFY_H
130
#   define EV_USE_INOTIFY 1
131
#  else
132
#   define EV_USE_INOTIFY 0
133
#  endif
134
# endif
135
136
# ifndef EV_USE_EVENTFD
137
#  if HAVE_EVENTFD
138
#   define EV_USE_EVENTFD 1
139
#  else
140
#   define EV_USE_EVENTFD 0
141
#  endif
142
# endif
143
 
144
#endif
145
146
#include <math.h>
147
#include <stdlib.h>
148
#include <fcntl.h>
149
#include <stddef.h>
150
151
#include <stdio.h>
152
153
#include <assert.h>
154
#include <errno.h>
155
#include <sys/types.h>
156
#include <time.h>
157
158
#include <signal.h>
159
160
#ifdef EV_H
161
# include EV_H
162
#else
163
# include "ev.h"
164
#endif
165
166
#ifndef _WIN32
167
# include <sys/time.h>
168
# include <sys/wait.h>
169
# include <unistd.h>
170
#else
171
# include <io.h>
172
# define WIN32_LEAN_AND_MEAN
173
# include <windows.h>
174
# ifndef EV_SELECT_IS_WINSOCKET
175
#  define EV_SELECT_IS_WINSOCKET 1
176
# endif
177
#endif
178
179
/* this block tries to deduce configuration from header-defined symbols and defaults */
180
181
#ifndef EV_USE_CLOCK_SYSCALL
182
# if __linux && __GLIBC__ >= 2
183
#  define EV_USE_CLOCK_SYSCALL 1
184
# else
185
#  define EV_USE_CLOCK_SYSCALL 0
186
# endif
187
#endif
188
189
#ifndef EV_USE_MONOTONIC
190
# if defined (_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
191
#  define EV_USE_MONOTONIC 1
192
# else
193
#  define EV_USE_MONOTONIC 0
194
# endif
195
#endif
196
197
#ifndef EV_USE_REALTIME
198
# define EV_USE_REALTIME !EV_USE_CLOCK_SYSCALL
199
#endif
200
201
#ifndef EV_USE_NANOSLEEP
202
# if _POSIX_C_SOURCE >= 199309L
203
#  define EV_USE_NANOSLEEP 1
204
# else
205
#  define EV_USE_NANOSLEEP 0
206
# endif
207
#endif
208
209
#ifndef EV_USE_SELECT
210
# define EV_USE_SELECT 1
211
#endif
212
213
#ifndef EV_USE_POLL
214
# ifdef _WIN32
215
#  define EV_USE_POLL 0
216
# else
217
#  define EV_USE_POLL 1
218
# endif
219
#endif
220
221
#ifndef EV_USE_EPOLL
222
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
223
#  define EV_USE_EPOLL 1
224
# else
225
#  define EV_USE_EPOLL 0
226
# endif
227
#endif
228
229
#ifndef EV_USE_KQUEUE
230
# define EV_USE_KQUEUE 0
231
#endif
232
233
#ifndef EV_USE_PORT
234
# define EV_USE_PORT 0
235
#endif
236
237
#ifndef EV_USE_INOTIFY
238
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
239
#  define EV_USE_INOTIFY 1
240
# else
241
#  define EV_USE_INOTIFY 0
242
# endif
243
#endif
244
245
#ifndef EV_PID_HASHSIZE
246
# if EV_MINIMAL
247
#  define EV_PID_HASHSIZE 1
248
# else
249
#  define EV_PID_HASHSIZE 16
250
# endif
251
#endif
252
253
#ifndef EV_INOTIFY_HASHSIZE
254
# if EV_MINIMAL
255
#  define EV_INOTIFY_HASHSIZE 1
256
# else
257
#  define EV_INOTIFY_HASHSIZE 16
258
# endif
259
#endif
260
261
#ifndef EV_USE_EVENTFD
262
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 7))
263
#  define EV_USE_EVENTFD 1
264
# else
265
#  define EV_USE_EVENTFD 0
266
# endif
267
#endif
268
269
#if 0 /* debugging */
270
# define EV_VERIFY 3
271
# define EV_USE_4HEAP 1
272
# define EV_HEAP_CACHE_AT 1
273
#endif
274
275
#ifndef EV_VERIFY
276
# define EV_VERIFY !EV_MINIMAL
277
#endif
278
279
#ifndef EV_USE_4HEAP
280
# define EV_USE_4HEAP !EV_MINIMAL
281
#endif
282
283
#ifndef EV_HEAP_CACHE_AT
284
# define EV_HEAP_CACHE_AT !EV_MINIMAL
285
#endif
286
287
/* this block fixes any misconfiguration where we know we run into trouble otherwise */
288
289
#ifndef CLOCK_MONOTONIC
290
# undef EV_USE_MONOTONIC
291
# define EV_USE_MONOTONIC 0
292
#endif
293
294
#ifndef CLOCK_REALTIME
295
# undef EV_USE_REALTIME
296
# define EV_USE_REALTIME 0
297
#endif
298
299
#if !EV_STAT_ENABLE
300
# undef EV_USE_INOTIFY
301
# define EV_USE_INOTIFY 0
302
#endif
303
304
#if !EV_USE_NANOSLEEP
305
# ifndef _WIN32
306
#  include <sys/select.h>
307
# endif
308
#endif
309
310
#if EV_USE_INOTIFY
311
# include <sys/utsname.h>
312
# include <sys/statfs.h>
313
# include <sys/inotify.h>
314
/* some very old inotify.h headers don't have IN_DONT_FOLLOW */
315
# ifndef IN_DONT_FOLLOW
316
#  undef EV_USE_INOTIFY
317
#  define EV_USE_INOTIFY 0
318
# endif
319
#endif
320
321
#if EV_SELECT_IS_WINSOCKET
322
# include <winsock.h>
323
#endif
324
325
/* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
326
/* which makes programs even slower. might work on other unices, too. */
327
#if EV_USE_CLOCK_SYSCALL
328
# include <syscall.h>
329
# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
330
# undef EV_USE_MONOTONIC
331
# define EV_USE_MONOTONIC 1
332
#endif
333
334
#if EV_USE_EVENTFD
335
/* our minimum requirement is glibc 2.7 which has the stub, but not the header */
336
# include <stdint.h>
337
# ifdef __cplusplus
338
extern "C" {
339
# endif
340
int eventfd (unsigned int initval, int flags);
341
# ifdef __cplusplus
342
}
343
# endif
344
#endif
345
346
/**/
347
348
#if EV_VERIFY >= 3
349
# define EV_FREQUENT_CHECK ev_loop_verify (EV_A)
350
#else
351
# define EV_FREQUENT_CHECK do { } while (0)
352
#endif
353
354
/*
355
 * This is used to avoid floating point rounding problems.
356
 * It is added to ev_rt_now when scheduling periodics
357
 * to ensure progress, time-wise, even when rounding
358
 * errors are against us.
359
 * This value is good at least till the year 4000.
360
 * Better solutions welcome.
361
 */
362
#define TIME_EPSILON  0.0001220703125 /* 1/8192 */
363
364
#define MIN_TIMEJUMP  1. /* minimum timejump that gets detected (if monotonic clock available) */
365
#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
366
/*#define CLEANUP_INTERVAL (MAX_BLOCKTIME * 5.) /* how often to try to free memory and re-check fds, TODO */
367
368
#if __GNUC__ >= 4
369
# define expect(expr,value)         __builtin_expect ((expr),(value))
370
# define noinline                   __attribute__ ((noinline))
371
#else
372
# define expect(expr,value)         (expr)
373
# define noinline
374
# if __STDC_VERSION__ < 199901L && __GNUC__ < 2
375
#  define inline
376
# endif
377
#endif
378
379
#define expect_false(expr) expect ((expr) != 0, 0)
380
#define expect_true(expr)  expect ((expr) != 0, 1)
381
#define inline_size        static inline
382
383
#if EV_MINIMAL
384
# define inline_speed      static noinline
385
#else
386
# define inline_speed      static inline
387
#endif
388
389
#define NUMPRI    (EV_MAXPRI - EV_MINPRI + 1)
390
#define ABSPRI(w) (((W)w)->priority - EV_MINPRI)
391
392
#define EMPTY       /* required for microsofts broken pseudo-c compiler */
393
#define EMPTY2(a,b) /* used to suppress some warnings */
394
395
typedef ev_watcher *W;
396
typedef ev_watcher_list *WL;
397
typedef ev_watcher_time *WT;
398
399
#define ev_active(w) ((W)(w))->active
400
#define ev_at(w) ((WT)(w))->at
401
402
#if EV_USE_REALTIME
403
/* sig_atomic_t is used to avoid per-thread variables or locking but still */
404
/* giving it a reasonably high chance of working on typical architetcures */
405
static EV_ATOMIC_T have_realtime; /* did clock_gettime (CLOCK_REALTIME) work? */
406
#endif
407
408
#if EV_USE_MONOTONIC
409
static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work? */
410
#endif
411
412
#ifdef _WIN32
413
# include "ev_win32.c"
414
#endif
415
416
/*****************************************************************************/
417
418
static void (*syserr_cb)(const char *msg);
419
420
void
421
ev_set_syserr_cb (void (*cb)(const char *msg))
422
{
423
  syserr_cb = cb;
424
}
425
426
static void noinline
427
ev_syserr (const char *msg)
428
{
429
  if (!msg)
430
    msg = "(libev) system error";
431
432
  if (syserr_cb)
433
    syserr_cb (msg);
434
  else
435
    {
436
      perror (msg);
437
      abort ();
438
    }
439
}
440
441
static void *
442
ev_realloc_emul (void *ptr, long size)
443
{
444
  /* some systems, notably openbsd and darwin, fail to properly
445
   * implement realloc (x, 0) (as required by both ansi c-98 and
446
   * the single unix specification, so work around them here.
447
   */
448
449
  if (size)
450
    return realloc (ptr, size);
451
452
  free (ptr);
453
  return 0;
454
}
455
456
static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
457
458
void
459
ev_set_allocator (void *(*cb)(void *ptr, long size))
460
{
461
  alloc = cb;
462
}
463
464
inline_speed void *
465
ev_realloc (void *ptr, long size)
466
{
467
  ptr = alloc (ptr, size);
468
469
  if (!ptr && size)
470
    {
471
      fprintf (stderr, "libev: cannot allocate %ld bytes, aborting.", size);
472
      abort ();
473
    }
474
475
  return ptr;
476
}
477
478
#define ev_malloc(size) ev_realloc (0, (size))
479
#define ev_free(ptr)    ev_realloc ((ptr), 0)
480
481
/*****************************************************************************/
482
483
typedef struct
484
{
485
  WL head;
486
  unsigned char events;
487
  unsigned char reify;
488
  unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
489
  unsigned char unused;
490
#if EV_USE_EPOLL
491
  unsigned int egen;  /* generation counter to counter epoll bugs */
492
#endif
493
#if EV_SELECT_IS_WINSOCKET
494
  SOCKET handle;
495
#endif
496
} ANFD;
497
498
typedef struct
499
{
500
  W w;
501
  int events;
502
} ANPENDING;
503
504
#if EV_USE_INOTIFY
505
/* hash table entry per inotify-id */
506
typedef struct
507
{
508
  WL head;
509
} ANFS;
510
#endif
511
512
/* Heap Entry */
513
#if EV_HEAP_CACHE_AT
514
  typedef struct {
515
    ev_tstamp at;
516
    WT w;
517
  } ANHE;
518
519
  #define ANHE_w(he)        (he).w     /* access watcher, read-write */
520
  #define ANHE_at(he)       (he).at    /* access cached at, read-only */
521
  #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */
522
#else
523
  typedef WT ANHE;
524
525
  #define ANHE_w(he)        (he)
526
  #define ANHE_at(he)       (he)->at
527
  #define ANHE_at_cache(he)
528
#endif
529
530
#if EV_MULTIPLICITY
531
532
  struct ev_loop
533
  {
534
    ev_tstamp ev_rt_now;
535
    #define ev_rt_now ((loop)->ev_rt_now)
536
    #define VAR(name,decl) decl;
537
      #include "ev_vars.h"
538
    #undef VAR
539
  };
540
  #include "ev_wrap.h"
541
542
  static struct ev_loop default_loop_struct;
543
  struct ev_loop *ev_default_loop_ptr;
544
545
#else
546
547
  ev_tstamp ev_rt_now;
548
  #define VAR(name,decl) static decl;
549
    #include "ev_vars.h"
550
  #undef VAR
551
552
  static int ev_default_loop_ptr;
553
554
#endif
555
556
/*****************************************************************************/
557
558
ev_tstamp
559
ev_time (void)
560
{
561
#if EV_USE_REALTIME
562
  if (expect_true (have_realtime))
563
    {
564
      struct timespec ts;
565
      clock_gettime (CLOCK_REALTIME, &ts);
566
      return ts.tv_sec + ts.tv_nsec * 1e-9;
567
    }
568
#endif
569
570
  struct timeval tv;
571
  gettimeofday (&tv, 0);
572
  return tv.tv_sec + tv.tv_usec * 1e-6;
573
}
574
575
ev_tstamp inline_size
576
get_clock (void)
577
{
578
#if EV_USE_MONOTONIC
579
  if (expect_true (have_monotonic))
580
    {
581
      struct timespec ts;
582
      clock_gettime (CLOCK_MONOTONIC, &ts);
583
      return ts.tv_sec + ts.tv_nsec * 1e-9;
584
    }
585
#endif
586
587
  return ev_time ();
588
}
589
590
#if EV_MULTIPLICITY
591
ev_tstamp
592
ev_now (EV_P)
593
{
594
  return ev_rt_now;
595
}
596
#endif
597
598
void
599
ev_sleep (ev_tstamp delay)
600
{
601
  if (delay > 0.)
602
    {
603
#if EV_USE_NANOSLEEP
604
      struct timespec ts;
605
606
      ts.tv_sec  = (time_t)delay;
607
      ts.tv_nsec = (long)((delay - (ev_tstamp)(ts.tv_sec)) * 1e9);
608
609
      nanosleep (&ts, 0);
610
#elif defined(_WIN32)
611
      Sleep ((unsigned long)(delay * 1e3));
612
#else
613
      struct timeval tv;
614
615
      tv.tv_sec  = (time_t)delay;
616
      tv.tv_usec = (long)((delay - (ev_tstamp)(tv.tv_sec)) * 1e6);
617
618
      /* here we rely on sys/time.h + sys/types.h + unistd.h providing select */
619
      /* somehting nto guaranteed by newer posix versions, but guaranteed */
620
      /* by older ones */
621
      select (0, 0, 0, 0, &tv);
622
#endif
623
    }
624
}
625
626
/*****************************************************************************/
627
628
#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
629
630
int inline_size
631
array_nextsize (int elem, int cur, int cnt)
632
{
633
  int ncur = cur + 1;
634
635
  do
636
    ncur <<= 1;
637
  while (cnt > ncur);
638
639
  /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
640
  if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
641
    {
642
      ncur *= elem;
643
      ncur = (ncur + elem + (MALLOC_ROUND - 1) + sizeof (void *) * 4) & ~(MALLOC_ROUND - 1);
644
      ncur = ncur - sizeof (void *) * 4;
645
      ncur /= elem;
646
    }
647
648
  return ncur;
649
}
650
651
static noinline void *
652
array_realloc (int elem, void *base, int *cur, int cnt)
653
{
654
  *cur = array_nextsize (elem, *cur, cnt);
655
  return ev_realloc (base, elem * *cur);
656
}
657
658
#define array_init_zero(base,count)        \
659
  memset ((void *)(base), 0, sizeof (*(base)) * (count))
660
661
#define array_needsize(type,base,cur,cnt,init)                        \
662
  if (expect_false ((cnt) > (cur)))                                \
663
    {                                                                \
664
      int ocur_ = (cur);                                        \
665
      (base) = (type *)array_realloc                                \
666
         (sizeof (type), (base), &(cur), (cnt));                \
667
      init ((base) + (ocur_), (cur) - ocur_);                        \
668
    }
669
670
#if 0
671
#define array_slim(type,stem)                                        \
672
  if (stem ## max < array_roundsize (stem ## cnt >> 2))                \
673
    {                                                                \
674
      stem ## max = array_roundsize (stem ## cnt >> 1);                \
675
      base = (type *)ev_realloc (base, sizeof (type) * (stem ## max));\
676
      fprintf (stderr, "slimmed down " # stem " to %d\n", stem ## max);/*D*/\
677
    }
678
#endif
679
680
#define array_free(stem, idx) \
681
  ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0;
682
683
/*****************************************************************************/
684
685
void noinline
686
ev_feed_event (EV_P_ void *w, int revents)
687
{
688
  W w_ = (W)w;
689
  int pri = ABSPRI (w_);
690
691
  if (expect_false (w_->pending))
692
    pendings [pri][w_->pending - 1].events |= revents;
693
  else
694
    {
695
      w_->pending = ++pendingcnt [pri];
696
      array_needsize (ANPENDING, pendings [pri], pendingmax [pri], w_->pending, EMPTY2);
697
      pendings [pri][w_->pending - 1].w      = w_;
698
      pendings [pri][w_->pending - 1].events = revents;
699
    }
700
}
701
702
void inline_speed
703
queue_events (EV_P_ W *events, int eventcnt, int type)
704
{
705
  int i;
706
707
  for (i = 0; i < eventcnt; ++i)
708
    ev_feed_event (EV_A_ events [i], type);
709
}
710
711
/*****************************************************************************/
712
713
void inline_speed
714
fd_event (EV_P_ int fd, int revents)
715
{
716
  ANFD *anfd = anfds + fd;
717
  ev_io *w;
718
719
  for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
720
    {
721
      int ev = w->events & revents;
722
723
      if (ev)
724
        ev_feed_event (EV_A_ (W)w, ev);
725
    }
726
}
727
728
void
729
ev_feed_fd_event (EV_P_ int fd, int revents)
730
{
731
  if (fd >= 0 && fd < anfdmax)
732
    fd_event (EV_A_ fd, revents);
733
}
734
735
void inline_size
736
fd_reify (EV_P)
737
{
738
  int i;
739
740
  for (i = 0; i < fdchangecnt; ++i)
741
    {
742
      int fd = fdchanges [i];
743
      ANFD *anfd = anfds + fd;
744
      ev_io *w;
745
746
      unsigned char events = 0;
747
748
      for (w = (ev_io *)anfd->head; w; w = (ev_io *)((WL)w)->next)
749
        events |= (unsigned char)w->events;
750
751
#if EV_SELECT_IS_WINSOCKET
752
      if (events)
753
        {
754
          unsigned long arg;
755
          #ifdef EV_FD_TO_WIN32_HANDLE
756
            anfd->handle = EV_FD_TO_WIN32_HANDLE (fd);
757
          #else
758
            anfd->handle = _get_osfhandle (fd);
759
          #endif
760
          assert (("libev: only socket fds supported in this configuration", ioctlsocket (anfd->handle, FIONREAD, &arg) == 0));
761
        }
762
#endif
763
764
      {
765
        unsigned char o_events = anfd->events;
766
        unsigned char o_reify  = anfd->reify;
767
768
        anfd->reify  = 0;
769
        anfd->events = events;
770
771
        if (o_events != events || o_reify & EV_IOFDSET)
772
          backend_modify (EV_A_ fd, o_events, events);
773
      }
774
    }
775
776
  fdchangecnt = 0;
777
}
778
779
void inline_size
780
fd_change (EV_P_ int fd, int flags)
781
{
782
  unsigned char reify = anfds [fd].reify;
783
  anfds [fd].reify |= flags;
784
785
  if (expect_true (!reify))
786
    {
787
      ++fdchangecnt;
788
      array_needsize (int, fdchanges, fdchangemax, fdchangecnt, EMPTY2);
789
      fdchanges [fdchangecnt - 1] = fd;
790
    }
791
}
792
793
void inline_speed
794
fd_kill (EV_P_ int fd)
795
{
796
  ev_io *w;
797
798
  while ((w = (ev_io *)anfds [fd].head))
799
    {
800
      ev_io_stop (EV_A_ w);
801
      ev_feed_event (EV_A_ (W)w, EV_ERROR | EV_READ | EV_WRITE);
802
    }
803
}
804
805
int inline_size
806
fd_valid (int fd)
807
{
808
#ifdef _WIN32
809
  return _get_osfhandle (fd) != -1;
810
#else
811
  return fcntl (fd, F_GETFD) != -1;
812
#endif
813
}
814
815
/* called on EBADF to verify fds */
816
static void noinline
817
fd_ebadf (EV_P)
818
{
819
  int fd;
820
821
  for (fd = 0; fd < anfdmax; ++fd)
822
    if (anfds [fd].events)
823
      if (!fd_valid (fd) && errno == EBADF)
824
        fd_kill (EV_A_ fd);
825
}
826
827
/* called on ENOMEM in select/poll to kill some fds and retry */
828
static void noinline
829
fd_enomem (EV_P)
830
{
831
  int fd;
832
833
  for (fd = anfdmax; fd--; )
834
    if (anfds [fd].events)
835
      {
836
        fd_kill (EV_A_ fd);
837
        return;
838
      }
839
}
840
841
/* usually called after fork if backend needs to re-arm all fds from scratch */
842
static void noinline
843
fd_rearm_all (EV_P)
844
{
845
  int fd;
846
847
  for (fd = 0; fd < anfdmax; ++fd)
848
    if (anfds [fd].events)
849
      {
850
        anfds [fd].events = 0;
851
        anfds [fd].emask  = 0;
852
        fd_change (EV_A_ fd, EV_IOFDSET | 1);
853
      }
854
}
855
856
/*****************************************************************************/
857
858
/*
859
 * the heap functions want a real array index. array index 0 uis guaranteed to not
860
 * be in-use at any time. the first heap entry is at array [HEAP0]. DHEAP gives
861
 * the branching factor of the d-tree.
862
 */
863
864
/*
865
 * at the moment we allow libev the luxury of two heaps,
866
 * a small-code-size 2-heap one and a ~1.5kb larger 4-heap
867
 * which is more cache-efficient.
868
 * the difference is about 5% with 50000+ watchers.
869
 */
870
#if EV_USE_4HEAP
871
872
#define DHEAP 4
873
#define HEAP0 (DHEAP - 1) /* index of first element in heap */
874
#define HPARENT(k) ((((k) - HEAP0 - 1) / DHEAP) + HEAP0)
875
#define UPHEAP_DONE(p,k) ((p) == (k))
876
877
/* away from the root */
878
void inline_speed
879
downheap (ANHE *heap, int N, int k)
880
{
881
  ANHE he = heap [k];
882
  ANHE *E = heap + N + HEAP0;
883
884
  for (;;)
885
    {
886
      ev_tstamp minat;
887
      ANHE *minpos;
888
      ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
889
890
      /* find minimum child */
891
      if (expect_true (pos + DHEAP - 1 < E))
892
        {
893
          /* fast path */                               (minpos = pos + 0), (minat = ANHE_at (*minpos));
894
          if (               ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
895
          if (               ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
896
          if (               ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
897
        }
898
      else if (pos < E)
899
        {
900
          /* slow path */                               (minpos = pos + 0), (minat = ANHE_at (*minpos));
901
          if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
902
          if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
903
          if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
904
        }
905
      else
906
        break;
907
908
      if (ANHE_at (he) <= minat)
909
        break;
910
911
      heap [k] = *minpos;
912
      ev_active (ANHE_w (*minpos)) = k;
913
914
      k = minpos - heap;
915
    }
916
917
  heap [k] = he;
918
  ev_active (ANHE_w (he)) = k;
919
}
920
921
#else /* 4HEAP */
922
923
#define HEAP0 1
924
#define HPARENT(k) ((k) >> 1)
925
#define UPHEAP_DONE(p,k) (!(p))
926
927
/* away from the root */
928
void inline_speed
929
downheap (ANHE *heap, int N, int k)
930
{
931
  ANHE he = heap [k];
932
933
  for (;;)
934
    {
935
      int c = k << 1;
936
937
      if (c > N + HEAP0 - 1)
938
        break;
939
940
      c += c + 1 < N + HEAP0 && ANHE_at (heap [c]) > ANHE_at (heap [c + 1])
941
           ? 1 : 0;
942
943
      if (ANHE_at (he) <= ANHE_at (heap [c]))
944
        break;
945
946
      heap [k] = heap [c];
947
      ev_active (ANHE_w (heap [k])) = k;
948
      
949
      k = c;
950
    }
951
952
  heap [k] = he;
953
  ev_active (ANHE_w (he)) = k;
954
}
955
#endif
956
957
/* towards the root */
958
void inline_speed
959
upheap (ANHE *heap, int k)
960
{
961
  ANHE he = heap [k];
962
963
  for (;;)
964
    {
965
      int p = HPARENT (k);
966
967
      if (UPHEAP_DONE (p, k) || ANHE_at (heap [p]) <= ANHE_at (he))
968
        break;
969
970
      heap [k] = heap [p];
971
      ev_active (ANHE_w (heap [k])) = k;
972
      k = p;
973
    }
974
975
  heap [k] = he;
976
  ev_active (ANHE_w (he)) = k;
977
}
978
979
void inline_size
980
adjustheap (ANHE *heap, int N, int k)
981
{
982
  if (k > HEAP0 && ANHE_at (heap [HPARENT (k)]) >= ANHE_at (heap [k]))
983
    upheap (heap, k);
984
  else
985
    downheap (heap, N, k);
986
}
987
988
/* rebuild the heap: this function is used only once and executed rarely */
989
void inline_size
990
reheap (ANHE *heap, int N)
991
{
992
  int i;
993
994
  /* we don't use floyds algorithm, upheap is simpler and is more cache-efficient */
995
  /* also, this is easy to implement and correct for both 2-heaps and 4-heaps */
996
  for (i = 0; i < N; ++i)
997
    upheap (heap, i + HEAP0);
998
}
999
1000
/*****************************************************************************/
1001
1002
typedef struct
1003
{
1004
  WL head;
1005
  EV_ATOMIC_T gotsig;
1006
} ANSIG;
1007
1008
static ANSIG *signals;
1009
static int signalmax;
1010
1011
static EV_ATOMIC_T gotsig;
1012
1013
/*****************************************************************************/
1014
1015
void inline_speed
1016
fd_intern (int fd)
1017
{
1018
#ifdef _WIN32
1019
  unsigned long arg = 1;
1020
  ioctlsocket (_get_osfhandle (fd), FIONBIO, &arg);
1021
#else
1022
  fcntl (fd, F_SETFD, FD_CLOEXEC);
1023
  fcntl (fd, F_SETFL, O_NONBLOCK);
1024
#endif
1025
}
1026
1027
static void noinline
1028
evpipe_init (EV_P)
1029
{
1030
  if (!ev_is_active (&pipeev))
1031
    {
1032
#if EV_USE_EVENTFD
1033
      if ((evfd = eventfd (0, 0)) >= 0)
1034
        {
1035
          evpipe [0] = -1;
1036
          fd_intern (evfd);
1037
          ev_io_set (&pipeev, evfd, EV_READ);
1038
        }
1039
      else
1040
#endif
1041
        {
1042
          while (pipe (evpipe))
1043
            ev_syserr ("(libev) error creating signal/async pipe");
1044
1045
          fd_intern (evpipe [0]);
1046
          fd_intern (evpipe [1]);
1047
          ev_io_set (&pipeev, evpipe [0], EV_READ);
1048
        }
1049
1050
      ev_io_start (EV_A_ &pipeev);
1051
      ev_unref (EV_A); /* watcher should not keep loop alive */
1052
    }
1053
}
1054
1055
void inline_size
1056
evpipe_write (EV_P_ EV_ATOMIC_T *flag)
1057
{
1058
  if (!*flag)
1059
    {
1060
      int old_errno = errno; /* save errno because write might clobber it */
1061
1062
      *flag = 1;
1063
1064
#if EV_USE_EVENTFD
1065
      if (evfd >= 0)
1066
        {
1067
          uint64_t counter = 1;
1068
          write (evfd, &counter, sizeof (uint64_t));
1069
        }
1070
      else
1071
#endif
1072
        write (evpipe [1], &old_errno, 1);
1073
1074
      errno = old_errno;
1075
    }
1076
}
1077
1078
static void
1079
pipecb (EV_P_ ev_io *iow, int revents)
1080
{
1081
#if EV_USE_EVENTFD
1082
  if (evfd >= 0)
1083
    {
1084
      uint64_t counter;
1085
      read (evfd, &counter, sizeof (uint64_t));
1086
    }
1087
  else
1088
#endif
1089
    {
1090
      char dummy;
1091
      read (evpipe [0], &dummy, 1);
1092
    }
1093
1094
  if (gotsig && ev_is_default_loop (EV_A))
1095
    {    
1096
      int signum;
1097
      gotsig = 0;
1098
1099
      for (signum = signalmax; signum--; )
1100
        if (signals [signum].gotsig)
1101
          ev_feed_signal_event (EV_A_ signum + 1);
1102
    }
1103
1104
#if EV_ASYNC_ENABLE
1105
  if (gotasync)
1106
    {
1107
      int i;
1108
      gotasync = 0;
1109
1110
      for (i = asynccnt; i--; )
1111
        if (asyncs [i]->sent)
1112
          {
1113
            asyncs [i]->sent = 0;
1114
            ev_feed_event (EV_A_ asyncs [i], EV_ASYNC);
1115
          }
1116
    }
1117
#endif
1118
}
1119
1120
/*****************************************************************************/
1121
1122
static void
1123
ev_sighandler (int signum)
1124
{
1125
#if EV_MULTIPLICITY
1126
  struct ev_loop *loop = &default_loop_struct;
1127
#endif
1128
1129
#if _WIN32
1130
  signal (signum, ev_sighandler);
1131
#endif
1132
1133
  signals [signum - 1].gotsig = 1;
1134
  evpipe_write (EV_A_ &gotsig);
1135
}
1136
1137
void noinline
1138
ev_feed_signal_event (EV_P_ int signum)
1139
{
1140
  WL w;
1141
1142
#if EV_MULTIPLICITY
1143
  assert (("libev: feeding signal events is only supported in the default loop", loop == ev_default_loop_ptr));
1144
#endif
1145
1146
  --signum;
1147
1148
  if (signum < 0 || signum >= signalmax)
1149
    return;
1150
1151
  signals [signum].gotsig = 0;
1152
1153
  for (w = signals [signum].head; w; w = w->next)
1154
    ev_feed_event (EV_A_ (W)w, EV_SIGNAL);
1155
}
1156
1157
/*****************************************************************************/
1158
1159
static WL childs [EV_PID_HASHSIZE];
1160
1161
#ifndef _WIN32
1162
1163
static ev_signal childev;
1164
1165
#ifndef WIFCONTINUED
1166
# define WIFCONTINUED(status) 0
1167
#endif
1168
1169
void inline_speed
1170
child_reap (EV_P_ int chain, int pid, int status)
1171
{
1172
  ev_child *w;
1173
  int traced = WIFSTOPPED (status) || WIFCONTINUED (status);
1174
1175
  for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next)
1176
    {
1177
      if ((w->pid == pid || !w->pid)
1178
          && (!traced || (w->flags & 1)))
1179
        {
1180
          ev_set_priority (w, EV_MAXPRI); /* need to do it *now*, this *must* be the same prio as the signal watcher itself */
1181
          w->rpid    = pid;
1182
          w->rstatus = status;
1183
          ev_feed_event (EV_A_ (W)w, EV_CHILD);
1184
        }
1185
    }
1186
}
1187
1188
#ifndef WCONTINUED
1189
# define WCONTINUED 0
1190
#endif
1191
1192
static void
1193
childcb (EV_P_ ev_signal *sw, int revents)
1194
{
1195
  int pid, status;
1196
1197
  /* some systems define WCONTINUED but then fail to support it (linux 2.4) */
1198
  if (0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED | WCONTINUED)))
1199
    if (!WCONTINUED
1200
        || errno != EINVAL
1201
        || 0 >= (pid = waitpid (-1, &status, WNOHANG | WUNTRACED)))
1202
      return;
1203
1204
  /* make sure we are called again until all children have been reaped */
1205
  /* we need to do it this way so that the callback gets called before we continue */
1206
  ev_feed_event (EV_A_ (W)sw, EV_SIGNAL);
1207
1208
  child_reap (EV_A_ pid, pid, status);
1209
  if (EV_PID_HASHSIZE > 1)
1210
    child_reap (EV_A_ 0, pid, status); /* this might trigger a watcher twice, but feed_event catches that */
1211
}
1212
1213
#endif
1214
1215
/*****************************************************************************/
1216
1217
#if EV_USE_PORT
1218
# include "ev_port.c"
1219
#endif
1220
#if EV_USE_KQUEUE
1221
# include "ev_kqueue.c"
1222
#endif
1223
#if EV_USE_EPOLL
1224
# include "ev_epoll.c"
1225
#endif
1226
#if EV_USE_POLL
1227
# include "ev_poll.c"
1228
#endif
1229
#if EV_USE_SELECT
1230
# include "ev_select.c"
1231
#endif
1232
1233
int
1234
ev_version_major (void)
1235
{
1236
  return EV_VERSION_MAJOR;
1237
}
1238
1239
int
1240
ev_version_minor (void)
1241
{
1242
  return EV_VERSION_MINOR;
1243
}
1244
1245
/* return true if we are running with elevated privileges and should ignore env variables */
1246
int inline_size
1247
enable_secure (void)
1248
{
1249
#ifdef _WIN32
1250
  return 0;
1251
#else
1252
  return getuid () != geteuid ()
1253
      || getgid () != getegid ();
1254
#endif
1255
}
1256
1257
unsigned int
1258
ev_supported_backends (void)
1259
{
1260
  unsigned int flags = 0;
1261
1262
  if (EV_USE_PORT  ) flags |= EVBACKEND_PORT;
1263
  if (EV_USE_KQUEUE) flags |= EVBACKEND_KQUEUE;
1264
  if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
1265
  if (EV_USE_POLL  ) flags |= EVBACKEND_POLL;
1266
  if (EV_USE_SELECT) flags |= EVBACKEND_SELECT;
1267
  
1268
  return flags;
1269
}
1270
1271
unsigned int
1272
ev_recommended_backends (void)
1273
{
1274
  unsigned int flags = ev_supported_backends ();
1275
1276
#ifndef __NetBSD__
1277
  /* kqueue is borked on everything but netbsd apparently */
1278
  /* it usually doesn't work correctly on anything but sockets and pipes */
1279
  flags &= ~EVBACKEND_KQUEUE;
1280
#endif
1281
#ifdef __APPLE__
1282
  /* only select works correctly on that "unix-certified" platform */
1283
  flags &= ~EVBACKEND_KQUEUE; /* horribly broken, even for sockets */
1284
  flags &= ~EVBACKEND_POLL;   /* poll is based on kqueue from 10.5 onwards */
1285
#endif
1286
1287
  return flags;
1288
}
1289
1290
unsigned int
1291
ev_embeddable_backends (void)
1292
{
1293
  int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
1294
1295
  /* epoll embeddability broken on all linux versions up to at least 2.6.23 */
1296
  /* please fix it and tell me how to detect the fix */
1297
  flags &= ~EVBACKEND_EPOLL;
1298
1299
  return flags;
1300
}
1301
1302
unsigned int
1303
ev_backend (EV_P)
1304
{
1305
  return backend;
1306
}
1307
1308
unsigned int
1309
ev_loop_count (EV_P)
1310
{
1311
  return loop_count;
1312
}
1313
1314
void
1315
ev_set_io_collect_interval (EV_P_ ev_tstamp interval)
1316
{
1317
  io_blocktime = interval;
1318
}
1319
1320
void
1321
ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval)
1322
{
1323
  timeout_blocktime = interval;
1324
}
1325
1326
static void noinline
1327
loop_init (EV_P_ unsigned int flags)
1328
{
1329
  if (!backend)
1330
    {
1331
#if EV_USE_REALTIME
1332
      if (!have_realtime)
1333
        {
1334
          struct timespec ts;
1335
1336
          if (!clock_gettime (CLOCK_REALTIME, &ts))
1337
            have_realtime = 1;
1338
        }
1339
#endif
1340
1341
#if EV_USE_MONOTONIC
1342
      if (!have_monotonic)
1343
        {
1344
          struct timespec ts;
1345
1346
          if (!clock_gettime (CLOCK_MONOTONIC, &ts))
1347
            have_monotonic = 1;
1348
        }
1349
#endif
1350
1351
      ev_rt_now         = ev_time ();
1352
      mn_now            = get_clock ();
1353
      now_floor         = mn_now;
1354
      rtmn_diff         = ev_rt_now - mn_now;
1355
1356
      io_blocktime      = 0.;
1357
      timeout_blocktime = 0.;
1358
      backend           = 0;
1359
      backend_fd        = -1;
1360
      gotasync          = 0;
1361
#if EV_USE_INOTIFY
1362
      fs_fd             = -2;
1363
#endif
1364
1365
      /* pid check not overridable via env */
1366
#ifndef _WIN32
1367
      if (flags & EVFLAG_FORKCHECK)
1368
        curpid = getpid ();
1369
#endif
1370
1371
      if (!(flags & EVFLAG_NOENV)
1372
          && !enable_secure ()
1373
          && getenv ("LIBEV_FLAGS"))
1374
        flags = atoi (getenv ("LIBEV_FLAGS"));
1375
1376
      if (!(flags & 0x0000ffffU))
1377
        flags |= ev_recommended_backends ();
1378
1379
#if EV_USE_PORT
1380
      if (!backend && (flags & EVBACKEND_PORT  )) backend = port_init   (EV_A_ flags);
1381
#endif
1382
#if EV_USE_KQUEUE
1383
      if (!backend && (flags & EVBACKEND_KQUEUE)) backend = kqueue_init (EV_A_ flags);
1384
#endif
1385
#if EV_USE_EPOLL
1386
      if (!backend && (flags & EVBACKEND_EPOLL )) backend = epoll_init  (EV_A_ flags);
1387
#endif
1388
#if EV_USE_POLL
1389
      if (!backend && (flags & EVBACKEND_POLL  )) backend = poll_init   (EV_A_ flags);
1390
#endif
1391
#if EV_USE_SELECT
1392
      if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
1393
#endif
1394
1395
      ev_init (&pipeev, pipecb);
1396
      ev_set_priority (&pipeev, EV_MAXPRI);
1397
    }
1398
}
1399
1400
static void noinline
1401
loop_destroy (EV_P)
1402
{
1403
  int i;
1404
1405
  if (ev_is_active (&pipeev))
1406
    {
1407
      ev_ref (EV_A); /* signal watcher */
1408
      ev_io_stop (EV_A_ &pipeev);
1409
1410
#if EV_USE_EVENTFD
1411
      if (evfd >= 0)
1412
        close (evfd);
1413
#endif
1414
1415
      if (evpipe [0] >= 0)
1416
        {
1417
          close (evpipe [0]);
1418
          close (evpipe [1]);
1419
        }
1420
    }
1421
1422
#if EV_USE_INOTIFY
1423
  if (fs_fd >= 0)
1424
    close (fs_fd);
1425
#endif
1426
1427
  if (backend_fd >= 0)
1428
    close (backend_fd);
1429
1430
#if EV_USE_PORT
1431
  if (backend == EVBACKEND_PORT  ) port_destroy   (EV_A);
1432
#endif
1433
#if EV_USE_KQUEUE
1434
  if (backend == EVBACKEND_KQUEUE) kqueue_destroy (EV_A);
1435
#endif
1436
#if EV_USE_EPOLL
1437
  if (backend == EVBACKEND_EPOLL ) epoll_destroy  (EV_A);
1438
#endif
1439
#if EV_USE_POLL
1440
  if (backend == EVBACKEND_POLL  ) poll_destroy   (EV_A);
1441
#endif
1442
#if EV_USE_SELECT
1443
  if (backend == EVBACKEND_SELECT) select_destroy (EV_A);
1444
#endif
1445
1446
  for (i = NUMPRI; i--; )
1447
    {
1448
      array_free (pending, [i]);
1449
#if EV_IDLE_ENABLE
1450
      array_free (idle, [i]);
1451
#endif
1452
    }
1453
1454
  ev_free (anfds); anfdmax = 0;
1455
1456
  /* have to use the microsoft-never-gets-it-right macro */
1457
  array_free (fdchange, EMPTY);
1458
  array_free (timer, EMPTY);
1459
#if EV_PERIODIC_ENABLE
1460
  array_free (periodic, EMPTY);
1461
#endif
1462
#if EV_FORK_ENABLE
1463
  array_free (fork, EMPTY);
1464
#endif
1465
  array_free (prepare, EMPTY);
1466
  array_free (check, EMPTY);
1467
#if EV_ASYNC_ENABLE
1468
  array_free (async, EMPTY);
1469
#endif
1470
1471
  backend = 0;
1472
}
1473
1474
#if EV_USE_INOTIFY
1475
void inline_size infy_fork (EV_P);
1476
#endif
1477
1478
void inline_size
1479
loop_fork (EV_P)
1480
{
1481
#if EV_USE_PORT
1482
  if (backend == EVBACKEND_PORT  ) port_fork   (EV_A);
1483
#endif
1484
#if EV_USE_KQUEUE
1485
  if (backend == EVBACKEND_KQUEUE) kqueue_fork (EV_A);
1486
#endif
1487
#if EV_USE_EPOLL
1488
  if (backend == EVBACKEND_EPOLL ) epoll_fork  (EV_A);
1489
#endif
1490
#if EV_USE_INOTIFY
1491
  infy_fork (EV_A);
1492
#endif
1493
1494
  if (ev_is_active (&pipeev))
1495
    {
1496
      /* this "locks" the handlers against writing to the pipe */
1497
      /* while we modify the fd vars */
1498
      gotsig = 1;
1499
#if EV_ASYNC_ENABLE
1500
      gotasync = 1;
1501
#endif
1502
1503
      ev_ref (EV_A);
1504
      ev_io_stop (EV_A_ &pipeev);
1505
1506
#if EV_USE_EVENTFD
1507
      if (evfd >= 0)
1508
        close (evfd);
1509
#endif
1510
1511
      if (evpipe [0] >= 0)
1512
        {
1513
          close (evpipe [0]);
1514
          close (evpipe [1]);
1515
        }
1516
1517
      evpipe_init (EV_A);
1518
      /* now iterate over everything, in case we missed something */
1519
      pipecb (EV_A_ &pipeev, EV_READ);
1520
    }
1521
1522
  postfork = 0;
1523
}
1524
1525
#if EV_MULTIPLICITY
1526
1527
struct ev_loop *
1528
ev_loop_new (unsigned int flags)
1529
{
1530
  struct ev_loop *loop = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
1531
1532
  memset (loop, 0, sizeof (struct ev_loop));
1533
1534
  loop_init (EV_A_ flags);
1535
1536
  if (ev_backend (EV_A))
1537
    return loop;
1538
1539
  return 0;
1540
}
1541
1542
void
1543
ev_loop_destroy (EV_P)
1544
{
1545
  loop_destroy (EV_A);
1546
  ev_free (loop);
1547
}
1548
1549
void
1550
ev_loop_fork (EV_P)
1551
{
1552
  postfork = 1; /* must be in line with ev_default_fork */
1553
}
1554
1555
#if EV_VERIFY
1556
static void noinline
1557
verify_watcher (EV_P_ W w)
1558
{
1559
  assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
1560
1561
  if (w->pending)
1562
    assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
1563
}
1564
1565
static void noinline
1566
verify_heap (EV_P_ ANHE *heap, int N)
1567
{
1568
  int i;
1569
1570
  for (i = HEAP0; i < N + HEAP0; ++i)
1571
    {
1572
      assert (("libev: active index mismatch in heap", ev_active (ANHE_w (heap [i])) == i));
1573
      assert (("libev: heap condition violated", i == HEAP0 || ANHE_at (heap [HPARENT (i)]) <= ANHE_at (heap [i])));
1574
      assert (("libev: heap at cache mismatch", ANHE_at (heap [i]) == ev_at (ANHE_w (heap [i]))));
1575
1576
      verify_watcher (EV_A_ (W)ANHE_w (heap [i]));
1577
    }
1578
}
1579
1580
static void noinline
1581
array_verify (EV_P_ W *ws, int cnt)
1582
{
1583
  while (cnt--)
1584
    {
1585
      assert (("libev: active index mismatch", ev_active (ws [cnt]) == cnt + 1));
1586
      verify_watcher (EV_A_ ws [cnt]);
1587
    }
1588
}
1589
#endif
1590
1591
void
1592
ev_loop_verify (EV_P)
1593
{
1594
#if EV_VERIFY
1595
  int i;
1596
  WL w;
1597
1598
  assert (activecnt >= -1);
1599
1600
  assert (fdchangemax >= fdchangecnt);
1601
  for (i = 0; i < fdchangecnt; ++i)
1602
    assert (("libev: negative fd in fdchanges", fdchanges [i] >= 0));
1603
1604
  assert (anfdmax >= 0);
1605
  for (i = 0; i < anfdmax; ++i)
1606
    for (w = anfds [i].head; w; w = w->next)
1607
      {
1608
        verify_watcher (EV_A_ (W)w);
1609
        assert (("libev: inactive fd watcher on anfd list", ev_active (w) == 1));
1610
        assert (("libev: fd mismatch between watcher and anfd", ((ev_io *)w)->fd == i));
1611
      }
1612
1613
  assert (timermax >= timercnt);
1614
  verify_heap (EV_A_ timers, timercnt);
1615
1616
#if EV_PERIODIC_ENABLE
1617
  assert (periodicmax >= periodiccnt);
1618
  verify_heap (EV_A_ periodics, periodiccnt);
1619
#endif
1620
1621
  for (i = NUMPRI; i--; )
1622
    {
1623
      assert (pendingmax [i] >= pendingcnt [i]);
1624
#if EV_IDLE_ENABLE
1625
      assert (idleall >= 0);
1626
      assert (idlemax [i] >= idlecnt [i]);
1627
      array_verify (EV_A_ (W *)idles [i], idlecnt [i]);
1628
#endif
1629
    }
1630
1631
#if EV_FORK_ENABLE
1632
  assert (forkmax >= forkcnt);
1633
  array_verify (EV_A_ (W *)forks, forkcnt);
1634
#endif
1635
1636
#if EV_ASYNC_ENABLE
1637
  assert (asyncmax >= asynccnt);
1638
  array_verify (EV_A_ (W *)asyncs, asynccnt);
1639
#endif
1640
1641
  assert (preparemax >= preparecnt);
1642
  array_verify (EV_A_ (W *)prepares, preparecnt);
1643
1644
  assert (checkmax >= checkcnt);
1645
  array_verify (EV_A_ (W *)checks, checkcnt);
1646
1647
# if 0
1648
  for (w = (ev_child *)childs [chain & (EV_PID_HASHSIZE - 1)]; w; w = (ev_child *)((WL)w)->next)
1649
  for (signum = signalmax; signum--; ) if (signals [signum].gotsig)
1650
# endif
1651
#endif
1652
}
1653
1654
#endif /* multiplicity */
1655
1656
#if EV_MULTIPLICITY
1657
struct ev_loop *
1658
ev_default_loop_init (unsigned int flags)
1659
#else
1660
int
1661
ev_default_loop (unsigned int flags)
1662
#endif
1663
{
1664
  if (!ev_default_loop_ptr)
1665
    {
1666
#if EV_MULTIPLICITY
1667
      struct ev_loop *loop = ev_default_loop_ptr = &default_loop_struct;
1668
#else
1669
      ev_default_loop_ptr = 1;
1670
#endif
1671
1672
      loop_init (EV_A_ flags);
1673
1674
      if (ev_backend (EV_A))
1675
        {
1676
#ifndef _WIN32
1677
          ev_signal_init (&childev, childcb, SIGCHLD);
1678
          ev_set_priority (&childev, EV_MAXPRI);
1679
          ev_signal_start (EV_A_ &childev);
1680
          ev_unref (EV_A); /* child watcher should not keep loop alive */
1681
#endif
1682
        }
1683
      else
1684
        ev_default_loop_ptr = 0;
1685
    }
1686
1687
  return ev_default_loop_ptr;
1688
}
1689
1690
void
1691
ev_default_destroy (void)
1692
{
1693
#if EV_MULTIPLICITY
1694
  struct ev_loop *loop = ev_default_loop_ptr;
1695
#endif
1696
1697
  ev_default_loop_ptr = 0;
1698
1699
#ifndef _WIN32
1700
  ev_ref (EV_A); /* child watcher */
1701
  ev_signal_stop (EV_A_ &childev);
1702
#endif
1703
1704
  loop_destroy (EV_A);
1705
}
1706
1707
void
1708
ev_default_fork (void)
1709
{
1710
#if EV_MULTIPLICITY
1711
  struct ev_loop *loop = ev_default_loop_ptr;
1712
#endif
1713
1714
  postfork = 1; /* must be in line with ev_loop_fork */
1715
}
1716
1717
/*****************************************************************************/
1718
1719
void
1720
ev_invoke (EV_P_ void *w, int revents)
1721
{
1722
  EV_CB_INVOKE ((W)w, revents);
1723
}
1724
1725
void inline_speed
1726
call_pending (EV_P)
1727
{
1728
  int pri;
1729
1730
  for (pri = NUMPRI; pri--; )
1731
    while (pendingcnt [pri])
1732
      {
1733
        ANPENDING *p = pendings [pri] + --pendingcnt [pri];
1734
1735
        if (expect_true (p->w))
1736
          {
1737
            /*assert (("libev: non-pending watcher on pending list", p->w->pending));*/
1738
1739
            p->w->pending = 0;
1740
            EV_CB_INVOKE (p->w, p->events);
1741
            EV_FREQUENT_CHECK;
1742
          }
1743
      }
1744
}
1745
1746
#if EV_IDLE_ENABLE
1747
void inline_size
1748
idle_reify (EV_P)
1749
{
1750
  if (expect_false (idleall))
1751
    {
1752
      int pri;
1753
1754
      for (pri = NUMPRI; pri--; )
1755
        {
1756
          if (pendingcnt [pri])
1757
            break;
1758
1759
          if (idlecnt [pri])
1760
            {
1761
              queue_events (EV_A_ (W *)idles [pri], idlecnt [pri], EV_IDLE);
1762
              break;
1763
            }
1764
        }
1765
    }
1766
}
1767
#endif
1768
1769
void inline_size
1770
timers_reify (EV_P)
1771
{
1772
  EV_FREQUENT_CHECK;
1773
1774
  while (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
1775
    {
1776
      ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
1777
1778
      /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/
1779
1780
      /* first reschedule or stop timer */
1781
      if (w->repeat)
1782
        {
1783
          ev_at (w) += w->repeat;
1784
          if (ev_at (w) < mn_now)
1785
            ev_at (w) = mn_now;
1786
1787
          assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
1788
1789
          ANHE_at_cache (timers [HEAP0]);
1790
          downheap (timers, timercnt, HEAP0);
1791
        }
1792
      else
1793
        ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
1794
1795
      EV_FREQUENT_CHECK;
1796
      ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
1797
    }
1798
}
1799
1800
#if EV_PERIODIC_ENABLE
1801
void inline_size
1802
periodics_reify (EV_P)
1803
{
1804
  EV_FREQUENT_CHECK;
1805
1806
  while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
1807
    {
1808
      ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
1809
1810
      /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
1811
1812
      /* first reschedule or stop timer */
1813
      if (w->reschedule_cb)
1814
        {
1815
          ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1816
1817
          assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
1818
1819
          ANHE_at_cache (periodics [HEAP0]);
1820
          downheap (periodics, periodiccnt, HEAP0);
1821
        }
1822
      else if (w->interval)
1823
        {
1824
          ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1825
          /* if next trigger time is not sufficiently in the future, put it there */
1826
          /* this might happen because of floating point inexactness */
1827
          if (ev_at (w) - ev_rt_now < TIME_EPSILON)
1828
            {
1829
              ev_at (w) += w->interval;
1830
1831
              /* if interval is unreasonably low we might still have a time in the past */
1832
              /* so correct this. this will make the periodic very inexact, but the user */
1833
              /* has effectively asked to get triggered more often than possible */
1834
              if (ev_at (w) < ev_rt_now)
1835
                ev_at (w) = ev_rt_now;
1836
            }
1837
1838
          ANHE_at_cache (periodics [HEAP0]);
1839
          downheap (periodics, periodiccnt, HEAP0);
1840
        }
1841
      else
1842
        ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
1843
1844
      EV_FREQUENT_CHECK;
1845
      ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
1846
    }
1847
}
1848
1849
static void noinline
1850
periodics_reschedule (EV_P)
1851
{
1852
  int i;
1853
1854
  /* adjust periodics after time jump */
1855
  for (i = HEAP0; i < periodiccnt + HEAP0; ++i)
1856
    {
1857
      ev_periodic *w = (ev_periodic *)ANHE_w (periodics [i]);
1858
1859
      if (w->reschedule_cb)
1860
        ev_at (w) = w->reschedule_cb (w, ev_rt_now);
1861
      else if (w->interval)
1862
        ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
1863
1864
      ANHE_at_cache (periodics [i]);
1865
    }
1866
1867
  reheap (periodics, periodiccnt);
1868
}
1869
#endif
1870
1871
void inline_speed
1872
time_update (EV_P_ ev_tstamp max_block)
1873
{
1874
  int i;
1875
1876
#if EV_USE_MONOTONIC
1877
  if (expect_true (have_monotonic))
1878
    {
1879
      ev_tstamp odiff = rtmn_diff;
1880
1881
      mn_now = get_clock ();
1882
1883
      /* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
1884
      /* interpolate in the meantime */
1885
      if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
1886
        {
1887
          ev_rt_now = rtmn_diff + mn_now;
1888
          return;
1889
        }
1890
1891
      now_floor = mn_now;
1892
      ev_rt_now = ev_time ();
1893
1894
      /* loop a few times, before making important decisions.
1895
       * on the choice of "4": one iteration isn't enough,
1896
       * in case we get preempted during the calls to
1897
       * ev_time and get_clock. a second call is almost guaranteed
1898
       * to succeed in that case, though. and looping a few more times
1899
       * doesn't hurt either as we only do this on time-jumps or
1900
       * in the unlikely event of having been preempted here.
1901
       */
1902
      for (i = 4; --i; )
1903
        {
1904
          rtmn_diff = ev_rt_now - mn_now;
1905
1906
          if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
1907
            return; /* all is well */
1908
1909
          ev_rt_now = ev_time ();
1910
          mn_now    = get_clock ();
1911
          now_floor = mn_now;
1912
        }
1913
1914
# if EV_PERIODIC_ENABLE
1915
      periodics_reschedule (EV_A);
1916
# endif
1917
      /* no timer adjustment, as the monotonic clock doesn't jump */
1918
      /* timers_reschedule (EV_A_ rtmn_diff - odiff) */
1919
    }
1920
  else
1921
#endif
1922
    {
1923
      ev_rt_now = ev_time ();
1924
1925
      if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
1926
        {
1927
#if EV_PERIODIC_ENABLE
1928
          periodics_reschedule (EV_A);
1929
#endif
1930
          /* adjust timers. this is easy, as the offset is the same for all of them */
1931
          for (i = 0; i < timercnt; ++i)
1932
            {
1933
              ANHE *he = timers + i + HEAP0;
1934
              ANHE_w (*he)->at += ev_rt_now - mn_now;
1935
              ANHE_at_cache (*he);
1936
            }
1937
        }
1938
1939
      mn_now = ev_rt_now;
1940
    }
1941
}
1942
1943
void
1944
ev_ref (EV_P)
1945
{
1946
  ++activecnt;
1947
}
1948
1949
void
1950
ev_unref (EV_P)
1951
{
1952
  --activecnt;
1953
}
1954
1955
void
1956
ev_now_update (EV_P)
1957
{
1958
  time_update (EV_A_ 1e100);
1959
}
1960
1961
static int loop_done;
1962
1963
void
1964
ev_loop (EV_P_ int flags)
1965
{
1966
  loop_done = EVUNLOOP_CANCEL;
1967
1968
  call_pending (EV_A); /* in case we recurse, ensure ordering stays nice and clean */
1969
1970
  do
1971
    {
1972
#if EV_VERIFY >= 2
1973
      ev_loop_verify (EV_A);
1974
#endif
1975
1976
#ifndef _WIN32
1977
      if (expect_false (curpid)) /* penalise the forking check even more */
1978
        if (expect_false (getpid () != curpid))
1979
          {
1980
            curpid = getpid ();
1981
            postfork = 1;
1982
          }
1983
#endif
1984
1985
#if EV_FORK_ENABLE
1986
      /* we might have forked, so queue fork handlers */
1987
      if (expect_false (postfork))
1988
        if (forkcnt)
1989
          {
1990
            queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
1991
            call_pending (EV_A);
1992
          }
1993
#endif
1994
1995
      /* queue prepare watchers (and execute them) */
1996
      if (expect_false (preparecnt))
1997
        {
1998
          queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
1999
          call_pending (EV_A);
2000
        }
2001
2002
      if (expect_false (!activecnt))
2003
        break;
2004
2005
      /* we might have forked, so reify kernel state if necessary */
2006
      if (expect_false (postfork))
2007
        loop_fork (EV_A);
2008
2009
      /* update fd-related kernel structures */
2010
      fd_reify (EV_A);
2011
2012
      /* calculate blocking time */
2013
      {
2014
        ev_tstamp waittime  = 0.;
2015
        ev_tstamp sleeptime = 0.;
2016
2017
        if (expect_true (!(flags & EVLOOP_NONBLOCK || idleall || !activecnt)))
2018
          {
2019
            /* update time to cancel out callback processing overhead */
2020
            time_update (EV_A_ 1e100);
2021
2022
            waittime = MAX_BLOCKTIME;
2023
2024
            if (timercnt)
2025
              {
2026
                ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge;
2027
                if (waittime > to) waittime = to;
2028
              }
2029
2030
#if EV_PERIODIC_ENABLE
2031
            if (periodiccnt)
2032
              {
2033
                ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
2034
                if (waittime > to) waittime = to;
2035
              }
2036
#endif
2037
2038
            if (expect_false (waittime < timeout_blocktime))
2039
              waittime = timeout_blocktime;
2040
2041
            sleeptime = waittime - backend_fudge;
2042
2043
            if (expect_true (sleeptime > io_blocktime))
2044
              sleeptime = io_blocktime;
2045
2046
            if (sleeptime)
2047
              {
2048
                ev_sleep (sleeptime);
2049
                waittime -= sleeptime;
2050
              }
2051
          }
2052
2053
        ++loop_count;
2054
        backend_poll (EV_A_ waittime);
2055
2056
        /* update ev_rt_now, do magic */
2057
        time_update (EV_A_ waittime + sleeptime);
2058
      }
2059
2060
      /* queue pending timers and reschedule them */
2061
      timers_reify (EV_A); /* relative timers called last */
2062
#if EV_PERIODIC_ENABLE
2063
      periodics_reify (EV_A); /* absolute timers called first */
2064
#endif
2065
2066
#if EV_IDLE_ENABLE
2067
      /* queue idle watchers unless other events are pending */
2068
      idle_reify (EV_A);
2069
#endif
2070
2071
      /* queue check watchers, to be executed first */
2072
      if (expect_false (checkcnt))
2073
        queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
2074
2075
      call_pending (EV_A);
2076
    }
2077
  while (expect_true (
2078
    activecnt
2079
    && !loop_done
2080
    && !(flags & (EVLOOP_ONESHOT | EVLOOP_NONBLOCK))
2081
  ));
2082
2083
  if (loop_done == EVUNLOOP_ONE)
2084
    loop_done = EVUNLOOP_CANCEL;
2085
}
2086
2087
void
2088
ev_unloop (EV_P_ int how)
2089
{
2090
  loop_done = how;
2091
}
2092
2093
/*****************************************************************************/
2094
2095
void inline_size
2096
wlist_add (WL *head, WL elem)
2097
{
2098
  elem->next = *head;
2099
  *head = elem;
2100
}
2101
2102
void inline_size
2103
wlist_del (WL *head, WL elem)
2104
{
2105
  while (*head)
2106
    {
2107
      if (*head == elem)
2108
        {
2109
          *head = elem->next;
2110
          return;
2111
        }
2112
2113
      head = &(*head)->next;
2114
    }
2115
}
2116
2117
void inline_speed
2118
clear_pending (EV_P_ W w)
2119
{
2120
  if (w->pending)
2121
    {
2122
      pendings [ABSPRI (w)][w->pending - 1].w = 0;
2123
      w->pending = 0;
2124
    }
2125
}
2126
2127
int
2128
ev_clear_pending (EV_P_ void *w)
2129
{
2130
  W w_ = (W)w;
2131
  int pending = w_->pending;
2132
2133
  if (expect_true (pending))
2134
    {
2135
      ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
2136
      w_->pending = 0;
2137
      p->w = 0;
2138
      return p->events;
2139
    }
2140
  else
2141
    return 0;
2142
}
2143
2144
void inline_size
2145
pri_adjust (EV_P_ W w)
2146
{
2147
  int pri = w->priority;
2148
  pri = pri < EV_MINPRI ? EV_MINPRI : pri;
2149
  pri = pri > EV_MAXPRI ? EV_MAXPRI : pri;
2150
  w->priority = pri;
2151
}
2152
2153
void inline_speed
2154
ev_start (EV_P_ W w, int active)
2155
{
2156
  pri_adjust (EV_A_ w);
2157
  w->active = active;
2158
  ev_ref (EV_A);
2159
}
2160
2161
void inline_size
2162
ev_stop (EV_P_ W w)
2163
{
2164
  ev_unref (EV_A);
2165
  w->active = 0;
2166
}
2167
2168
/*****************************************************************************/
2169
2170
void noinline
2171
ev_io_start (EV_P_ ev_io *w)
2172
{
2173
  int fd = w->fd;
2174
2175
  if (expect_false (ev_is_active (w)))
2176
    return;
2177
2178
  assert (("libev: ev_io_start called with negative fd", fd >= 0));
2179
  assert (("libev: ev_io start called with illegal event mask", !(w->events & ~(EV_IOFDSET | EV_READ | EV_WRITE))));
2180
2181
  EV_FREQUENT_CHECK;
2182
2183
  ev_start (EV_A_ (W)w, 1);
2184
  array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
2185
  wlist_add (&anfds[fd].head, (WL)w);
2186
2187
  fd_change (EV_A_ fd, w->events & EV_IOFDSET | 1);
2188
  w->events &= ~EV_IOFDSET;
2189
2190
  EV_FREQUENT_CHECK;
2191
}
2192
2193
void noinline
2194
ev_io_stop (EV_P_ ev_io *w)
2195
{
2196
  clear_pending (EV_A_ (W)w);
2197
  if (expect_false (!ev_is_active (w)))
2198
    return;
2199
2200
  assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
2201
2202
  EV_FREQUENT_CHECK;
2203
2204
  wlist_del (&anfds[w->fd].head, (WL)w);
2205
  ev_stop (EV_A_ (W)w);
2206
2207
  fd_change (EV_A_ w->fd, 1);
2208
2209
  EV_FREQUENT_CHECK;
2210
}
2211
2212
void noinline
2213
ev_timer_start (EV_P_ ev_timer *w)
2214
{
2215
  if (expect_false (ev_is_active (w)))
2216
    return;
2217
2218
  ev_at (w) += mn_now;
2219
2220
  assert (("libev: ev_timer_start called with negative timer repeat value", w->repeat >= 0.));
2221
2222
  EV_FREQUENT_CHECK;
2223
2224
  ++timercnt;
2225
  ev_start (EV_A_ (W)w, timercnt + HEAP0 - 1);
2226
  array_needsize (ANHE, timers, timermax, ev_active (w) + 1, EMPTY2);
2227
  ANHE_w (timers [ev_active (w)]) = (WT)w;
2228
  ANHE_at_cache (timers [ev_active (w)]);
2229
  upheap (timers, ev_active (w));
2230
2231
  EV_FREQUENT_CHECK;
2232
2233
  /*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
2234
}
2235
2236
void noinline
2237
ev_timer_stop (EV_P_ ev_timer *w)
2238
{
2239
  clear_pending (EV_A_ (W)w);
2240
  if (expect_false (!ev_is_active (w)))
2241
    return;
2242
2243
  EV_FREQUENT_CHECK;
2244
2245
  {
2246
    int active = ev_active (w);
2247
2248
    assert (("libev: internal timer heap corruption", ANHE_w (timers [active]) == (WT)w));
2249
2250
    --timercnt;
2251
2252
    if (expect_true (active < timercnt + HEAP0))
2253
      {
2254
        timers [active] = timers [timercnt + HEAP0];
2255
        adjustheap (timers, timercnt, active);
2256
      }
2257
  }
2258
2259
  EV_FREQUENT_CHECK;
2260
2261
  ev_at (w) -= mn_now;
2262
2263
  ev_stop (EV_A_ (W)w);
2264
}
2265
2266
void noinline
2267
ev_timer_again (EV_P_ ev_timer *w)
2268
{
2269
  EV_FREQUENT_CHECK;
2270
2271
  if (ev_is_active (w))
2272
    {
2273
      if (w->repeat)
2274
        {
2275
          ev_at (w) = mn_now + w->repeat;
2276
          ANHE_at_cache (timers [ev_active (w)]);
2277
          adjustheap (timers, timercnt, ev_active (w));
2278
        }
2279
      else
2280
        ev_timer_stop (EV_A_ w);
2281
    }
2282
  else if (w->repeat)
2283
    {
2284
      ev_at (w) = w->repeat;
2285
      ev_timer_start (EV_A_ w);
2286
    }
2287
2288
  EV_FREQUENT_CHECK;
2289
}
2290
2291
#if EV_PERIODIC_ENABLE
2292
void noinline
2293
ev_periodic_start (EV_P_ ev_periodic *w)
2294
{
2295
  if (expect_false (ev_is_active (w)))
2296
    return;
2297
2298
  if (w->reschedule_cb)
2299
    ev_at (w) = w->reschedule_cb (w, ev_rt_now);
2300
  else if (w->interval)
2301
    {
2302
      assert (("libev: ev_periodic_start called with negative interval value", w->interval >= 0.));
2303
      /* this formula differs from the one in periodic_reify because we do not always round up */
2304
      ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
2305
    }
2306
  else
2307
    ev_at (w) = w->offset;
2308
2309
  EV_FREQUENT_CHECK;
2310
2311
  ++periodiccnt;
2312
  ev_start (EV_A_ (W)w, periodiccnt + HEAP0 - 1);
2313
  array_needsize (ANHE, periodics, periodicmax, ev_active (w) + 1, EMPTY2);
2314
  ANHE_w (periodics [ev_active (w)]) = (WT)w;
2315
  ANHE_at_cache (periodics [ev_active (w)]);
2316
  upheap (periodics, ev_active (w));
2317
2318
  EV_FREQUENT_CHECK;
2319
2320
  /*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
2321
}
2322
2323
void noinline
2324
ev_periodic_stop (EV_P_ ev_periodic *w)
2325
{
2326
  clear_pending (EV_A_ (W)w);
2327
  if (expect_false (!ev_is_active (w)))
2328
    return;
2329
2330
  EV_FREQUENT_CHECK;
2331
2332
  {
2333
    int active = ev_active (w);
2334
2335
    assert (("libev: internal periodic heap corruption", ANHE_w (periodics [active]) == (WT)w));
2336
2337
    --periodiccnt;
2338
2339
    if (expect_true (active < periodiccnt + HEAP0))
2340
      {
2341
        periodics [active] = periodics [periodiccnt + HEAP0];
2342
        adjustheap (periodics, periodiccnt, active);
2343
      }
2344
  }
2345
2346
  EV_FREQUENT_CHECK;
2347
2348
  ev_stop (EV_A_ (W)w);
2349
}
2350
2351
void noinline
2352
ev_periodic_again (EV_P_ ev_periodic *w)
2353
{
2354
  /* TODO: use adjustheap and recalculation */
2355
  ev_periodic_stop (EV_A_ w);
2356
  ev_periodic_start (EV_A_ w);
2357
}
2358
#endif
2359
2360
#ifndef SA_RESTART
2361
# define SA_RESTART 0
2362
#endif
2363
2364
void noinline
2365
ev_signal_start (EV_P_ ev_signal *w)
2366
{
2367
#if EV_MULTIPLICITY
2368
  assert (("libev: signal watchers are only supported in the default loop", loop == ev_default_loop_ptr));
2369
#endif
2370
  if (expect_false (ev_is_active (w)))
2371
    return;
2372
2373
  assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0));
2374
2375
  evpipe_init (EV_A);
2376
2377
  EV_FREQUENT_CHECK;
2378
2379
  {
2380
#ifndef _WIN32
2381
    sigset_t full, prev;
2382
    sigfillset (&full);
2383
    sigprocmask (SIG_SETMASK, &full, &prev);
2384
#endif
2385
2386
    array_needsize (ANSIG, signals, signalmax, w->signum, array_init_zero);
2387
2388
#ifndef _WIN32
2389
    sigprocmask (SIG_SETMASK, &prev, 0);
2390
#endif
2391
  }
2392
2393
  ev_start (EV_A_ (W)w, 1);
2394
  wlist_add (&signals [w->signum - 1].head, (WL)w);
2395
2396
  if (!((WL)w)->next)
2397
    {
2398
#if _WIN32
2399
      signal (w->signum, ev_sighandler);
2400
#else
2401
      struct sigaction sa;
2402
      sa.sa_handler = ev_sighandler;
2403
      sigfillset (&sa.sa_mask);
2404
      sa.sa_flags = SA_RESTART; /* if restarting works we save one iteration */
2405
      sigaction (w->signum, &sa, 0);
2406
#endif
2407
    }
2408
2409
  EV_FREQUENT_CHECK;
2410
}
2411
2412
void noinline
2413
ev_signal_stop (EV_P_ ev_signal *w)
2414
{
2415
  clear_pending (EV_A_ (W)w);
2416
  if (expect_false (!ev_is_active (w)))
2417
    return;
2418
2419
  EV_FREQUENT_CHECK;
2420
2421
  wlist_del (&signals [w->signum - 1].head, (WL)w);
2422
  ev_stop (EV_A_ (W)w);
2423
2424
  if (!signals [w->signum - 1].head)
2425
    signal (w->signum, SIG_DFL);
2426
2427
  EV_FREQUENT_CHECK;
2428
}
2429
2430
void
2431
ev_child_start (EV_P_ ev_child *w)
2432
{
2433
#if EV_MULTIPLICITY
2434
  assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
2435
#endif
2436
  if (expect_false (ev_is_active (w)))
2437
    return;
2438
2439
  EV_FREQUENT_CHECK;
2440
2441
  ev_start (EV_A_ (W)w, 1);
2442
  wlist_add (&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w);
2443
2444
  EV_FREQUENT_CHECK;
2445
}
2446
2447
void
2448
ev_child_stop (EV_P_ ev_child *w)
2449
{
2450
  clear_pending (EV_A_ (W)w);
2451
  if (expect_false (!ev_is_active (w)))
2452
    return;
2453
2454
  EV_FREQUENT_CHECK;
2455
2456
  wlist_del (&childs [w->pid & (EV_PID_HASHSIZE - 1)], (WL)w);
2457
  ev_stop (EV_A_ (W)w);
2458
2459
  EV_FREQUENT_CHECK;
2460
}
2461
2462
#if EV_STAT_ENABLE
2463
2464
# ifdef _WIN32
2465
#  undef lstat
2466
#  define lstat(a,b) _stati64 (a,b)
2467
# endif
2468
2469
#define DEF_STAT_INTERVAL  5.0074891
2470
#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
2471
#define MIN_STAT_INTERVAL  0.1074891
2472
2473
static void noinline stat_timer_cb (EV_P_ ev_timer *w_, int revents);
2474
2475
#if EV_USE_INOTIFY
2476
# define EV_INOTIFY_BUFSIZE 8192
2477
2478
static void noinline
2479
infy_add (EV_P_ ev_stat *w)
2480
{
2481
  w->wd = inotify_add_watch (fs_fd, w->path, IN_ATTRIB | IN_DELETE_SELF | IN_MOVE_SELF | IN_MODIFY | IN_DONT_FOLLOW | IN_MASK_ADD);
2482
2483
  if (w->wd < 0)
2484
    {
2485
      w->timer.repeat = w->interval ? w->interval : DEF_STAT_INTERVAL;
2486
      ev_timer_again (EV_A_ &w->timer); /* this is not race-free, so we still need to recheck periodically */
2487
2488
      /* monitor some parent directory for speedup hints */
2489
      /* note that exceeding the hardcoded path limit is not a correctness issue, */
2490
      /* but an efficiency issue only */
2491
      if ((errno == ENOENT || errno == EACCES) && strlen (w->path) < 4096)
2492
        {
2493
          char path [4096];
2494
          strcpy (path, w->path);
2495
2496
          do
2497
            {
2498
              int mask = IN_MASK_ADD | IN_DELETE_SELF | IN_MOVE_SELF
2499
                       | (errno == EACCES ? IN_ATTRIB : IN_CREATE | IN_MOVED_TO);
2500
2501
              char *pend = strrchr (path, '/');
2502
2503
              if (!pend || pend == path)
2504
                break;
2505
2506
              *pend = 0;
2507
              w->wd = inotify_add_watch (fs_fd, path, mask);
2508
            } 
2509
          while (w->wd < 0 && (errno == ENOENT || errno == EACCES));
2510
        }
2511
    }
2512
2513
  if (w->wd >= 0)
2514
    {
2515
      wlist_add (&fs_hash [w->wd & (EV_INOTIFY_HASHSIZE - 1)].head, (WL)w);
2516
2517
      /* now local changes will be tracked by inotify, but remote changes won't */
2518
      /* unless the filesystem it known to be local, we therefore still poll */
2519
      /* also do poll on <2.6.25, but with normal frequency */
2520
      struct statfs sfs;
2521
2522
      if (fs_2625 && !statfs (w->path, &sfs))
2523
        if (sfs.f_type == 0x1373 /* devfs */
2524
            || sfs.f_type == 0xEF53 /* ext2/3 */
2525
            || sfs.f_type == 0x3153464a /* jfs */
2526
            || sfs.f_type == 0x52654973 /* reiser3 */
2527
            || sfs.f_type == 0x01021994 /* tempfs */
2528
            || sfs.f_type == 0x58465342 /* xfs */)
2529
          return;
2530
2531
      w->timer.repeat = w->interval ? w->interval : fs_2625 ? NFS_STAT_INTERVAL : DEF_STAT_INTERVAL;
2532
      ev_timer_again (EV_A_ &w->timer);
2533
    }
2534
}
2535
2536
static void noinline
2537
infy_del (EV_P_ ev_stat *w)
2538
{
2539
  int slot;
2540
  int wd = w->wd;
2541
2542
  if (wd < 0)
2543
    return;
2544
2545
  w->wd = -2;
2546
  slot = wd & (EV_INOTIFY_HASHSIZE - 1);
2547
  wlist_del (&fs_hash [slot].head, (WL)w);
2548
2549
  /* remove this watcher, if others are watching it, they will rearm */
2550
  inotify_rm_watch (fs_fd, wd);
2551
}
2552
2553
static void noinline
2554
infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
2555
{
2556
  if (slot < 0)
2557
    /* overflow, need to check for all hash slots */
2558
    for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot)
2559
      infy_wd (EV_A_ slot, wd, ev);
2560
  else
2561
    {
2562
      WL w_;
2563
2564
      for (w_ = fs_hash [slot & (EV_INOTIFY_HASHSIZE - 1)].head; w_; )
2565
        {
2566
          ev_stat *w = (ev_stat *)w_;
2567
          w_ = w_->next; /* lets us remove this watcher and all before it */
2568
2569
          if (w->wd == wd || wd == -1)
2570
            {
2571
              if (ev->mask & (IN_IGNORED | IN_UNMOUNT | IN_DELETE_SELF))
2572
                {
2573
                  wlist_del (&fs_hash [slot & (EV_INOTIFY_HASHSIZE - 1)].head, (WL)w);
2574
                  w->wd = -1;
2575
                  infy_add (EV_A_ w); /* re-add, no matter what */
2576
                }
2577
2578
              stat_timer_cb (EV_A_ &w->timer, 0);
2579
            }
2580
        }
2581
    }
2582
}
2583
2584
static void
2585
infy_cb (EV_P_ ev_io *w, int revents)
2586
{
2587
  char buf [EV_INOTIFY_BUFSIZE];
2588
  struct inotify_event *ev = (struct inotify_event *)buf;
2589
  int ofs;
2590
  int len = read (fs_fd, buf, sizeof (buf));
2591
2592
  for (ofs = 0; ofs < len; ofs += sizeof (struct inotify_event) + ev->len)
2593
    infy_wd (EV_A_ ev->wd, ev->wd, ev);
2594
}
2595
2596
void inline_size
2597
check_2625 (EV_P)
2598
{
2599
  /* kernels < 2.6.25 are borked
2600
   * http://www.ussg.indiana.edu/hypermail/linux/kernel/0711.3/1208.html
2601
   */
2602
  struct utsname buf;
2603
  int major, minor, micro;
2604
2605
  if (uname (&buf))
2606
    return;
2607
2608
  if (sscanf (buf.release, "%d.%d.%d", &major, &minor, &micro) != 3)
2609
    return;
2610
2611
  if (major < 2
2612
      || (major == 2 && minor < 6)
2613
      || (major == 2 && minor == 6 && micro < 25))
2614
    return;
2615
2616
  fs_2625 = 1;
2617
}
2618
2619
void inline_size
2620
infy_init (EV_P)
2621
{
2622
  if (fs_fd != -2)
2623
    return;
2624
2625
  fs_fd = -1;
2626
2627
  check_2625 (EV_A);
2628
2629
  fs_fd = inotify_init ();
2630
2631
  if (fs_fd >= 0)
2632
    {
2633
      ev_io_init (&fs_w, infy_cb, fs_fd, EV_READ);
2634
      ev_set_priority (&fs_w, EV_MAXPRI);
2635
      ev_io_start (EV_A_ &fs_w);
2636
    }
2637
}
2638
2639
void inline_size
2640
infy_fork (EV_P)
2641
{
2642
  int slot;
2643
2644
  if (fs_fd < 0)
2645
    return;
2646
2647
  close (fs_fd);
2648
  fs_fd = inotify_init ();
2649
2650
  for (slot = 0; slot < EV_INOTIFY_HASHSIZE; ++slot)
2651
    {
2652
      WL w_ = fs_hash [slot].head;
2653
      fs_hash [slot].head = 0;
2654
2655
      while (w_)
2656
        {
2657
          ev_stat *w = (ev_stat *)w_;
2658
          w_ = w_->next; /* lets us add this watcher */
2659
2660
          w->wd = -1;
2661
2662
          if (fs_fd >= 0)
2663
            infy_add (EV_A_ w); /* re-add, no matter what */
2664
          else
2665
            ev_timer_again (EV_A_ &w->timer);
2666
        }
2667
    }
2668
}
2669
2670
#endif
2671
2672
#ifdef _WIN32
2673
# define EV_LSTAT(p,b) _stati64 (p, b)
2674
#else
2675
# define EV_LSTAT(p,b) lstat (p, b)
2676
#endif
2677
2678
void
2679
ev_stat_stat (EV_P_ ev_stat *w)
2680
{
2681
  if (lstat (w->path, &w->attr) < 0)
2682
    w->attr.st_nlink = 0;
2683
  else if (!w->attr.st_nlink)
2684
    w->attr.st_nlink = 1;
2685
}
2686
2687
static void noinline
2688
stat_timer_cb (EV_P_ ev_timer *w_, int revents)
2689
{
2690
  ev_stat *w = (ev_stat *)(((char *)w_) - offsetof (ev_stat, timer));
2691
2692
  /* we copy this here each the time so that */
2693
  /* prev has the old value when the callback gets invoked */
2694
  w->prev = w->attr;
2695
  ev_stat_stat (EV_A_ w);
2696
2697
  /* memcmp doesn't work on netbsd, they.... do stuff to their struct stat */
2698
  if (
2699
    w->prev.st_dev      != w->attr.st_dev
2700
    || w->prev.st_ino   != w->attr.st_ino
2701
    || w->prev.st_mode  != w->attr.st_mode
2702
    || w->prev.st_nlink != w->attr.st_nlink
2703
    || w->prev.st_uid   != w->attr.st_uid
2704
    || w->prev.st_gid   != w->attr.st_gid
2705
    || w->prev.st_rdev  != w->attr.st_rdev
2706
    || w->prev.st_size  != w->attr.st_size
2707
    || w->prev.st_atime != w->attr.st_atime
2708
    || w->prev.st_mtime != w->attr.st_mtime
2709
    || w->prev.st_ctime != w->attr.st_ctime
2710
  ) {
2711
      #if EV_USE_INOTIFY
2712
        if (fs_fd >= 0)
2713
          {
2714
            infy_del (EV_A_ w);
2715
            infy_add (EV_A_ w);
2716
            ev_stat_stat (EV_A_ w); /* avoid race... */
2717
          }
2718
      #endif
2719
2720
      ev_feed_event (EV_A_ w, EV_STAT);
2721
    }
2722
}
2723
2724
void
2725
ev_stat_start (EV_P_ ev_stat *w)
2726
{
2727
  if (expect_false (ev_is_active (w)))
2728
    return;
2729
2730
  ev_stat_stat (EV_A_ w);
2731
2732
  if (w->interval < MIN_STAT_INTERVAL && w->interval)
2733
    w->interval = MIN_STAT_INTERVAL;
2734
2735
  ev_timer_init (&w->timer, stat_timer_cb, 0., w->interval ? w->interval : DEF_STAT_INTERVAL);
2736
  ev_set_priority (&w->timer, ev_priority (w));
2737
2738
#if EV_USE_INOTIFY
2739
  infy_init (EV_A);
2740
2741
  if (fs_fd >= 0)
2742
    infy_add (EV_A_ w);
2743
  else
2744
#endif
2745
    ev_timer_again (EV_A_ &w->timer);
2746
2747
  ev_start (EV_A_ (W)w, 1);
2748
2749
  EV_FREQUENT_CHECK;
2750
}
2751
2752
void
2753
ev_stat_stop (EV_P_ ev_stat *w)
2754
{
2755
  clear_pending (EV_A_ (W)w);
2756
  if (expect_false (!ev_is_active (w)))
2757
    return;
2758
2759
  EV_FREQUENT_CHECK;
2760
2761
#if EV_USE_INOTIFY
2762
  infy_del (EV_A_ w);
2763
#endif
2764
  ev_timer_stop (EV_A_ &w->timer);
2765
2766
  ev_stop (EV_A_ (W)w);
2767
2768
  EV_FREQUENT_CHECK;
2769
}
2770
#endif
2771
2772
#if EV_IDLE_ENABLE
2773
void
2774
ev_idle_start (EV_P_ ev_idle *w)
2775
{
2776
  if (expect_false (ev_is_active (w)))
2777
    return;
2778
2779
  pri_adjust (EV_A_ (W)w);
2780
2781
  EV_FREQUENT_CHECK;
2782
2783
  {
2784
    int active = ++idlecnt [ABSPRI (w)];
2785
2786
    ++idleall;
2787
    ev_start (EV_A_ (W)w, active);
2788
2789
    array_needsize (ev_idle *, idles [ABSPRI (w)], idlemax [ABSPRI (w)], active, EMPTY2);
2790
    idles [ABSPRI (w)][active - 1] = w;
2791
  }
2792
2793
  EV_FREQUENT_CHECK;
2794
}
2795
2796
void
2797
ev_idle_stop (EV_P_ ev_idle *w)
2798
{
2799
  clear_pending (EV_A_ (W)w);
2800
  if (expect_false (!ev_is_active (w)))
2801
    return;
2802
2803
  EV_FREQUENT_CHECK;
2804
2805
  {
2806
    int active = ev_active (w);
2807
2808
    idles [ABSPRI (w)][active - 1] = idles [ABSPRI (w)][--idlecnt [ABSPRI (w)]];
2809
    ev_active (idles [ABSPRI (w)][active - 1]) = active;
2810
2811
    ev_stop (EV_A_ (W)w);
2812
    --idleall;
2813
  }
2814
2815
  EV_FREQUENT_CHECK;
2816
}
2817
#endif
2818
2819
void
2820
ev_prepare_start (EV_P_ ev_prepare *w)
2821
{
2822
  if (expect_false (ev_is_active (w)))
2823
    return;
2824
2825
  EV_FREQUENT_CHECK;
2826
2827
  ev_start (EV_A_ (W)w, ++preparecnt);
2828
  array_needsize (ev_prepare *, prepares, preparemax, preparecnt, EMPTY2);
2829
  prepares [preparecnt - 1] = w;
2830
2831
  EV_FREQUENT_CHECK;
2832
}
2833
2834
void
2835
ev_prepare_stop (EV_P_ ev_prepare *w)
2836
{
2837
  clear_pending (EV_A_ (W)w);
2838
  if (expect_false (!ev_is_active (w)))
2839
    return;
2840
2841
  EV_FREQUENT_CHECK;
2842
2843
  {
2844
    int active = ev_active (w);
2845
2846
    prepares [active - 1] = prepares [--preparecnt];
2847
    ev_active (prepares [active - 1]) = active;
2848
  }
2849
2850
  ev_stop (EV_A_ (W)w);
2851
2852
  EV_FREQUENT_CHECK;
2853
}
2854
2855
void
2856
ev_check_start (EV_P_ ev_check *w)
2857
{
2858
  if (expect_false (ev_is_active (w)))
2859
    return;
2860
2861
  EV_FREQUENT_CHECK;
2862
2863
  ev_start (EV_A_ (W)w, ++checkcnt);
2864
  array_needsize (ev_check *, checks, checkmax, checkcnt, EMPTY2);
2865
  checks [checkcnt - 1] = w;
2866
2867
  EV_FREQUENT_CHECK;
2868
}
2869
2870
void
2871
ev_check_stop (EV_P_ ev_check *w)
2872
{
2873
  clear_pending (EV_A_ (W)w);
2874
  if (expect_false (!ev_is_active (w)))
2875
    return;
2876
2877
  EV_FREQUENT_CHECK;
2878
2879
  {
2880
    int active = ev_active (w);
2881
2882
    checks [active - 1] = checks [--checkcnt];
2883
    ev_active (checks [active - 1]) = active;
2884
  }
2885
2886
  ev_stop (EV_A_ (W)w);
2887
2888
  EV_FREQUENT_CHECK;
2889
}
2890
2891
#if EV_EMBED_ENABLE
2892
void noinline
2893
ev_embed_sweep (EV_P_ ev_embed *w)
2894
{
2895
  ev_loop (w->other, EVLOOP_NONBLOCK);
2896
}
2897
2898
static void
2899
embed_io_cb (EV_P_ ev_io *io, int revents)
2900
{
2901
  ev_embed *w = (ev_embed *)(((char *)io) - offsetof (ev_embed, io));
2902
2903
  if (ev_cb (w))
2904
    ev_feed_event (EV_A_ (W)w, EV_EMBED);
2905
  else
2906
    ev_loop (w->other, EVLOOP_NONBLOCK);
2907
}
2908
2909
static void
2910
embed_prepare_cb (EV_P_ ev_prepare *prepare, int revents)
2911
{
2912
  ev_embed *w = (ev_embed *)(((char *)prepare) - offsetof (ev_embed, prepare));
2913
2914
  {
2915
    struct ev_loop *loop = w->other;
2916
2917
    while (fdchangecnt)
2918
      {
2919
        fd_reify (EV_A);
2920
        ev_loop (EV_A_ EVLOOP_NONBLOCK);
2921
      }
2922
  }
2923
}
2924
2925
static void
2926
embed_fork_cb (EV_P_ ev_fork *fork_w, int revents)
2927
{
2928
  ev_embed *w = (ev_embed *)(((char *)fork_w) - offsetof (ev_embed, fork));
2929
2930
  ev_embed_stop (EV_A_ w);
2931
2932
  {
2933
    struct ev_loop *loop = w->other;
2934
2935
    ev_loop_fork (EV_A);
2936
    ev_loop (EV_A_ EVLOOP_NONBLOCK);
2937
  }
2938
2939
  ev_embed_start (EV_A_ w);
2940
}
2941
2942
#if 0
2943
static void
2944
embed_idle_cb (EV_P_ ev_idle *idle, int revents)
2945
{
2946
  ev_idle_stop (EV_A_ idle);
2947
}
2948
#endif
2949
2950
void
2951
ev_embed_start (EV_P_ ev_embed *w)
2952
{
2953
  if (expect_false (ev_is_active (w)))
2954
    return;
2955
2956
  {
2957
    struct ev_loop *loop = w->other;
2958
    assert (("libev: loop to be embedded is not embeddable", backend & ev_embeddable_backends ()));
2959
    ev_io_init (&w->io, embed_io_cb, backend_fd, EV_READ);
2960
  }
2961
2962
  EV_FREQUENT_CHECK;
2963
2964
  ev_set_priority (&w->io, ev_priority (w));
2965
  ev_io_start (EV_A_ &w->io);
2966
2967
  ev_prepare_init (&w->prepare, embed_prepare_cb);
2968
  ev_set_priority (&w->prepare, EV_MINPRI);
2969
  ev_prepare_start (EV_A_ &w->prepare);
2970
2971
  ev_fork_init (&w->fork, embed_fork_cb);
2972
  ev_fork_start (EV_A_ &w->fork);
2973
2974
  /*ev_idle_init (&w->idle, e,bed_idle_cb);*/
2975
2976
  ev_start (EV_A_ (W)w, 1);
2977
2978
  EV_FREQUENT_CHECK;
2979
}
2980
2981
void
2982
ev_embed_stop (EV_P_ ev_embed *w)
2983
{
2984
  clear_pending (EV_A_ (W)w);
2985
  if (expect_false (!ev_is_active (w)))
2986
    return;
2987
2988
  EV_FREQUENT_CHECK;
2989
2990
  ev_io_stop      (EV_A_ &w->io);
2991
  ev_prepare_stop (EV_A_ &w->prepare);
2992
  ev_fork_stop    (EV_A_ &w->fork);
2993
2994
  EV_FREQUENT_CHECK;
2995
}
2996
#endif
2997
2998
#if EV_FORK_ENABLE
2999
void
3000
ev_fork_start (EV_P_ ev_fork *w)
3001
{
3002
  if (expect_false (ev_is_active (w)))
3003
    return;
3004
3005
  EV_FREQUENT_CHECK;
3006
3007
  ev_start (EV_A_ (W)w, ++forkcnt);
3008
  array_needsize (ev_fork *, forks, forkmax, forkcnt, EMPTY2);
3009
  forks [forkcnt - 1] = w;
3010
3011
  EV_FREQUENT_CHECK;
3012
}
3013
3014
void
3015
ev_fork_stop (EV_P_ ev_fork *w)
3016
{
3017
  clear_pending (EV_A_ (W)w);
3018
  if (expect_false (!ev_is_active (w)))
3019
    return;
3020
3021
  EV_FREQUENT_CHECK;
3022
3023
  {
3024
    int active = ev_active (w);
3025
3026
    forks [active - 1] = forks [--forkcnt];
3027
    ev_active (forks [active - 1]) = active;
3028
  }
3029
3030
  ev_stop (EV_A_ (W)w);
3031
3032
  EV_FREQUENT_CHECK;
3033
}
3034
#endif
3035
3036
#if EV_ASYNC_ENABLE
3037
void
3038
ev_async_start (EV_P_ ev_async *w)
3039
{
3040
  if (expect_false (ev_is_active (w)))
3041
    return;
3042
3043
  evpipe_init (EV_A);
3044
3045
  EV_FREQUENT_CHECK;
3046
3047
  ev_start (EV_A_ (W)w, ++asynccnt);
3048
  array_needsize (ev_async *, asyncs, asyncmax, asynccnt, EMPTY2);
3049
  asyncs [asynccnt - 1] = w;
3050
3051
  EV_FREQUENT_CHECK;
3052
}
3053
3054
void
3055
ev_async_stop (EV_P_ ev_async *w)
3056
{
3057
  clear_pending (EV_A_ (W)w);
3058
  if (expect_false (!ev_is_active (w)))
3059
    return;
3060
3061
  EV_FREQUENT_CHECK;
3062
3063
  {
3064
    int active = ev_active (w);
3065
3066
    asyncs [active - 1] = asyncs [--asynccnt];
3067
    ev_active (asyncs [active - 1]) = active;
3068
  }
3069
3070
  ev_stop (EV_A_ (W)w);
3071
3072
  EV_FREQUENT_CHECK;
3073
}
3074
3075
void
3076
ev_async_send (EV_P_ ev_async *w)
3077
{
3078
  w->sent = 1;
3079
  evpipe_write (EV_A_ &gotasync);
3080
}
3081
#endif
3082
3083
/*****************************************************************************/
3084
3085
struct ev_once
3086
{
3087
  ev_io io;
3088
  ev_timer to;
3089
  void (*cb)(int revents, void *arg);
3090
  void *arg;
3091
};
3092
3093
static void
3094
once_cb (EV_P_ struct ev_once *once, int revents)
3095
{
3096
  void (*cb)(int revents, void *arg) = once->cb;
3097
  void *arg = once->arg;
3098
3099
  ev_io_stop    (EV_A_ &once->io);
3100
  ev_timer_stop (EV_A_ &once->to);
3101
  ev_free (once);
3102
3103
  cb (revents, arg);
3104
}
3105
3106
static void
3107
once_cb_io (EV_P_ ev_io *w, int revents)
3108
{
3109
  struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, io));
3110
3111
  once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->to));
3112
}
3113
3114
static void
3115
once_cb_to (EV_P_ ev_timer *w, int revents)
3116
{
3117
  struct ev_once *once = (struct ev_once *)(((char *)w) - offsetof (struct ev_once, to));
3118
3119
  once_cb (EV_A_ once, revents | ev_clear_pending (EV_A_ &once->io));
3120
}
3121
3122
void
3123
ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
3124
{
3125
  struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
3126
3127
  if (expect_false (!once))
3128
    {
3129
      cb (EV_ERROR | EV_READ | EV_WRITE | EV_TIMEOUT, arg);
3130
      return;
3131
    }
3132
3133
  once->cb  = cb;
3134
  once->arg = arg;
3135
3136
  ev_init (&once->io, once_cb_io);
3137
  if (fd >= 0)
3138
    {
3139
      ev_io_set (&once->io, fd, events);
3140
      ev_io_start (EV_A_ &once->io);
3141
    }
3142
3143
  ev_init (&once->to, once_cb_to);
3144
  if (timeout >= 0.)
3145
    {
3146
      ev_timer_set (&once->to, timeout, 0.);
3147
      ev_timer_start (EV_A_ &once->to);
3148
    }
3149
}
3150
3151
#if EV_MULTIPLICITY
3152
  #include "ev_wrap.h"
3153
#endif
3154
3155
#ifdef __cplusplus
3156
}
3157
#endif