diff --git a/third_party/libev/CVS/Entries b/third_party/libev/CVS/Entries
index 58e81559256bbb08f9a7a1954b0cd1dfeaf0b619..581f3dc8fd22d8b2dfaa0359cb6f7361bf43ab78 100644
--- a/third_party/libev/CVS/Entries
+++ b/third_party/libev/CVS/Entries
@@ -1,30 +1,31 @@
-/Changes/1.224/Wed Feb 16 11:00:32 2011//
-/LICENSE/1.9/Wed Feb 16 11:00:01 2011//
-/Makefile.am/1.7/Wed Feb 16 11:00:32 2011//
-/README/1.20/Wed Feb 16 11:00:01 2011//
-/README.embed/1.29/Wed Feb 16 11:00:01 2011//
-/Symbols.ev/1.14/Wed Feb 16 11:00:32 2011//
-/Symbols.event/1.3/Wed Feb 16 11:00:01 2011//
-/autogen.sh/1.2/Wed Feb 16 11:00:32 2011//
-/configure.ac/1.31/Wed Feb 16 11:00:32 2011//
-/ev++.h/1.57/Wed Feb 16 11:00:32 2011//
-/ev.3/1.87/Wed Feb 16 11:00:32 2011//
-/ev.c/1.372/Wed Feb 16 11:00:32 2011//
-/ev.h/1.160/Wed Feb 16 11:00:32 2011//
-/ev.pod/1.366/Wed Feb 16 11:00:32 2011//
-/ev_epoll.c/1.60/Wed Feb 16 11:00:32 2011//
-/ev_kqueue.c/1.48/Wed Feb 16 11:00:32 2011//
-/ev_poll.c/1.36/Wed Feb 16 11:00:32 2011//
-/ev_port.c/1.26/Wed Feb 16 11:00:32 2011//
-/ev_select.c/1.52/Wed Feb 16 11:00:32 2011//
-/ev_vars.h/1.46/Wed Feb 16 11:00:32 2011//
-/ev_win32.c/1.14/Wed Feb 16 11:00:32 2011//
-/ev_wrap.h/1.32/Wed Feb 16 11:00:32 2011//
-/event.c/1.50/Wed Feb 16 11:00:32 2011//
-/event.h/1.24/Wed Feb 16 11:00:32 2011//
-/event_compat.h/1.8/Wed Feb 16 11:00:32 2011//
-/import_libevent/1.29/Wed Feb 16 11:00:01 2011//
-/libev.m4/1.14/Wed Feb 16 11:00:32 2011//
-/update_ev_wrap/1.4/Wed Feb 16 11:00:01 2011//
-/update_symbols/1.1/Wed Feb 16 11:00:01 2011//
+/Changes/1.262/Sat Apr 28 19:20:35 2012//
+/LICENSE/1.10/Sat Apr 28 19:20:35 2012//
+/Makefile.am/1.9/Sat Apr 28 19:20:35 2012//
+/README/1.21/Sat Apr 28 19:20:35 2012//
+/README.embed/1.29/Thu Apr 26 19:51:22 2012//
+/Symbols.ev/1.14/Thu Apr 26 19:51:22 2012//
+/Symbols.event/1.3/Thu Apr 26 19:51:22 2012//
+/autogen.sh/1.3/Sat Apr 28 19:20:35 2012//
+/configure.ac/1.34/Sat Apr 28 19:20:35 2012//
+/ev++.h/1.58/Sat Apr 28 19:20:35 2012//
+/ev.3/1.92/Sat Apr 28 19:20:36 2012//
+/ev.c/1.423/Sat Apr 28 19:20:36 2012//
+/ev.h/1.169/Sat Apr 28 19:20:36 2012//
+/ev.pod/1.404/Sat Apr 28 19:20:36 2012//
+/ev_epoll.c/1.66/Sat Apr 28 19:20:36 2012//
+/ev_kqueue.c/1.54/Sat Apr 28 19:20:36 2012//
+/ev_poll.c/1.39/Sat Apr 28 19:20:36 2012//
+/ev_port.c/1.28/Sat Apr 28 19:20:36 2012//
+/ev_select.c/1.55/Sat Apr 28 19:20:36 2012//
+/ev_vars.h/1.51/Sat Apr 28 19:20:36 2012//
+/ev_win32.c/1.15/Sat Apr 28 19:20:36 2012//
+/ev_wrap.h/1.36/Sat Apr 28 19:20:36 2012//
+/event.c/1.52/Sat Apr 28 19:20:36 2012//
+/event.h/1.26/Sat Apr 28 19:20:36 2012//
+/event_compat.h/1.8/Thu Apr 26 19:51:22 2012//
+/import_libevent/1.29/Thu Apr 26 19:51:22 2012//
+/libev.m4/1.15/Sat Apr 28 19:20:36 2012//
+/update_ev_c/1.2/Wed Jan 18 12:13:14 2012//
+/update_ev_wrap/1.4/Thu Apr 26 19:51:22 2012//
+/update_symbols/1.1/Thu Apr 26 19:51:22 2012//
 D
diff --git a/third_party/libev/Changes b/third_party/libev/Changes
index d6fca2f67cde91d0c45c9d2e4fbf450d7fb95c6c..a4ea069053b4742390436783e48646e71172e545 100644
--- a/third_party/libev/Changes
+++ b/third_party/libev/Changes
@@ -1,5 +1,58 @@
 Revision history for libev, a high-performance and full-featured event loop.
 
+TODO: ev_loop_wakeup
+TODO: EV_STANDALONE == NO_HASSEL (do not use clock_gettime in ev_standalone)
+
+TODO: document WSA_EV_USE_SOCKET in win32 part
+TODO: ^ OR use WSASend/WSARecv on the handle, which always works
+TODO: assert on fd watcher linked list pointing to itself
+	- add throw() to all libev functions that cannot throw exceptions, for
+          further code size decrease when compiling for C++.
+        - add throw () to callbacks that must not throw exceptions (allocator,
+          syserr, loop acquire/release, periodic reschedule cbs).
+	- fix event_base_loop return code, add event_get_callback, event_base_new,
+          event_base_get_method calls to improve libevent 1.x emulation and add
+          some libevent 2.x funcitonality (based on a patch by Jeff Davey).
+	- ev_run now returns a boolean status (true meaning watchers are
+          still active).
+	- ev_once: undef EV_ERROR in ev_kqueue.c, to avoid clashing with
+          libev's EV_ERROR (reported by 191919).
+	- (ecb) add memory fence support for xlC (Darin McBride).
+	- (ecb) add memory fence support for gcc-mips (Anton Kirilov).
+	- (ecb) add memory fence support for gcc-alpha (Christian Weisgerber).
+        - work around some kernels losing file descriptors by leaking
+          the kqueue descriptor in the child.
+        - include sys/syscall.h instead of plain syscall.h.
+
+4.11 Sat Feb  4 19:52:39 CET 2012
+	- INCOMPATIBLE CHANGE: ev_timer_again now clears the pending status, as
+          was documented already, but not implemented in the repeating case.
+        - new compiletime symbols: EV_NO_SMP and EV_NO_THREADS.
+	- fix a race where the workaround against the epoll fork bugs
+          caused signals to not be handled anymore.
+	- correct backend_fudge for most backends, and implement a windows
+          specific workaround to avoid looping because we call both
+          select and Sleep, both with different time resolutions.
+        - document range and guarantees of ev_sleep.
+        - document reasonable ranges for periodics interval and offset.
+        - rename backend_fudge to backend_mintime to avoid future confusion :)
+	- change the default periodic reschedule function to hopefully be more
+          exact and correct even in corner cases or in the far future.
+        - do not rely on -lm anymore: use it when available but use our
+          own floor () if it is missing. This should make it easier to embed,
+          as no external libraries are required.
+        - strategically import macros from libecb and mark rarely-used functions
+          as cache-cold (saving almost 2k code size on typical amd64 setups).
+        - add Symbols.ev and Symbols.event files, that were missing.
+        - fix backend_mintime value for epoll (was 1/1024, is 1/1000 now).
+        - fix #3 "be smart about timeouts" to not "deadlock" when
+          timeout == now, also improve the section overall.
+        - avoid "AVOIDING FINISHING BEFORE RETURNING" idiom.
+        - support new EV_API_STATIC mode to make all libev symbols
+          static.
+        - supply default CFLAGS of -g -O3 with gcc when original CFLAGS
+          were empty.
+
 4.04 Wed Feb 16 09:01:51 CET 2011
 	- fix two problems in the native win32 backend, where reuse of fd's
           with different underlying handles caused handles not to be removed
@@ -94,7 +147,7 @@ Revision history for libev, a high-performance and full-featured event loop.
           that this is a race condition regardless of EV_SIGNALFD.
 	- backport inotify code to C89.
         - inotify file descriptors could leak into child processes.
-        - ev_stat watchers could keep an errornous extra ref on the loop,
+        - ev_stat watchers could keep an erroneous extra ref on the loop,
           preventing exit when unregistering all watchers (testcases
           provided by ry@tinyclouds.org).
         - implement EV_WIN32_HANDLE_TO_FD and EV_WIN32_CLOSE_FD configuration
@@ -162,7 +215,7 @@ Revision history for libev, a high-performance and full-featured event loop.
           Malek Hadj-Ali).
         - implement ev_suspend and ev_resume.
         - new EV_CUSTOM revents flag for use by applications.
-        - add documentation section about priorites.
+        - add documentation section about priorities.
         - add a glossary to the dcoumentation.
         - extend the ev_fork description slightly.
         - optimize a jump out of call_pending.
diff --git a/third_party/libev/LICENSE b/third_party/libev/LICENSE
index 7fa0e9f844f0bcc1f34a48c63b2388b02ac5d9e6..777d67caa660f83f3dfff4f06a355321012c2590 100644
--- a/third_party/libev/LICENSE
+++ b/third_party/libev/LICENSE
@@ -1,4 +1,5 @@
-All files in libev are Copyright (C)2007,2008,2009 Marc Alexander Lehmann.
+All files in libev are
+Copyright (c)2007,2008,2009,2010,2011,2012 Marc Alexander Lehmann.
 
 Redistribution and use in source and binary forms, with or without
 modification, are permitted provided that the following conditions are
diff --git a/third_party/libev/Makefile.am b/third_party/libev/Makefile.am
index 058c2cb0c1efc542c9539bcde4499524d6eaa102..059305bc3d013f715345d962b2d9f0ab221336fe 100644
--- a/third_party/libev/Makefile.am
+++ b/third_party/libev/Makefile.am
@@ -5,7 +5,7 @@ VERSION_INFO = 4:0:0
 EXTRA_DIST = LICENSE Changes libev.m4 autogen.sh \
 	     ev_vars.h ev_wrap.h \
 	     ev_epoll.c ev_select.c ev_poll.c ev_kqueue.c ev_port.c ev_win32.c \
-	     ev.3 ev.pod
+	     ev.3 ev.pod Symbols.ev Symbols.event
 
 man_MANS = ev.3
 
@@ -16,3 +16,5 @@ lib_LTLIBRARIES = libev.la
 libev_la_SOURCES = ev.c event.c
 libev_la_LDFLAGS = -version-info $(VERSION_INFO)
 
+ev.3: ev.pod
+	pod2man -n LIBEV -r "libev-$(VERSION)" -c "libev - high performance full featured event loop" -s3 <$< >$@
diff --git a/third_party/libev/README b/third_party/libev/README
index ca403c6f470494320d5de96f75f16f99946c68ac..31f619387dec681abc6b26a11672cf07a51aafd6 100644
--- a/third_party/libev/README
+++ b/third_party/libev/README
@@ -24,23 +24,23 @@ ABOUT
    - relative timers/timeouts (handle time jumps).
    - fast intra-thread communication between multiple
      event loops (with optional fast linux eventfd backend).
-   - extremely easy to embed.
-   - very small codebase, no bloated library.
+   - extremely easy to embed (fully documented, no dependencies,
+     autoconf supported but optional).
+   - very small codebase, no bloated library, simple code.
    - fully extensible by being able to plug into the event loop,
      integrate other event loops, integrate other event loop users.
    - very little memory use (small watchers, small event loop data).
    - optional C++ interface allowing method and function callbacks
      at no extra memory or runtime overhead.
    - optional Perl interface with similar characteristics (capable
-     of running Glib/Gtk2 on libev, interfaces with Net::SNMP and
-     libadns).
+     of running Glib/Gtk2 on libev).
    - support for other languages (multiple C++ interfaces, D, Ruby,
      Python) available from third-parties.
 
-   Examples of programs that embed libev: the EV perl module,
-   rxvt-unicode, gvpe (GNU Virtual Private Ethernet), the Deliantra MMORPG
-   server (http://www.deliantra.net/), Rubinius (a next-generation Ruby
-   VM), the Ebb web server, the Rev event toolkit.
+   Examples of programs that embed libev: the EV perl module, node.js,
+   auditd, rxvt-unicode, gvpe (GNU Virtual Private Ethernet), the
+   Deliantra MMORPG server (http://www.deliantra.net/), Rubinius (a
+   next-generation Ruby VM), the Ebb web server, the Rev event toolkit.
 
 
 CONTRIBUTORS
diff --git a/third_party/libev/autogen.sh b/third_party/libev/autogen.sh
index 087d2aa4e963b767adc7a08d828852875e436686..8056ee7f9be66c09300ec241e922550f7aab9d34 100644
--- a/third_party/libev/autogen.sh
+++ b/third_party/libev/autogen.sh
@@ -1,6 +1,3 @@
 #!/bin/sh
 
-libtoolize --force
-automake --add-missing --force-missing
-autoreconf
-
+autoreconf --install --symlink --force
diff --git a/third_party/libev/configure.ac b/third_party/libev/configure.ac
index 03a784f2eba9bcc262637747cc3984b3d4eb2c8f..31d0a25fe4e847203600ba9054254f4f5dec76e0 100644
--- a/third_party/libev/configure.ac
+++ b/third_party/libev/configure.ac
@@ -1,17 +1,24 @@
+orig_CFLAGS="$CFLAGS"
+
 AC_INIT
 AC_CONFIG_SRCDIR([ev_epoll.c])
 
-AM_INIT_AUTOMAKE(libev,4.04) dnl also update ev.h!
+AM_INIT_AUTOMAKE(libev,4.11) dnl also update ev.h!
 AC_CONFIG_HEADERS([config.h])
 AM_MAINTAINER_MODE
 
-AC_PROG_INSTALL
-AC_PROG_LIBTOOL
+AC_PROG_CC
 
-if test "x$GCC" = xyes ; then
-  CFLAGS="-O3 $CFLAGS"
+dnl Supply default CFLAGS, if not specified
+if test -z "$orig_CFLAGS"; then
+  if test x$GCC = xyes; then
+    CFLAGS="-g -O3"
+  fi
 fi
 
+AC_PROG_INSTALL
+AC_PROG_LIBTOOL
+
 m4_include([libev.m4])
 
 AC_CONFIG_FILES([Makefile])
diff --git a/third_party/libev/ev++.h b/third_party/libev/ev++.h
index ce42b5f2de0ef0241d258db609f26eab72f4b2eb..c5a0896bfd5f81b78d570178dc70791aa186ce8b 100644
--- a/third_party/libev/ev++.h
+++ b/third_party/libev/ev++.h
@@ -517,9 +517,9 @@ namespace ev {
     }
   };
 
-  inline tstamp now () throw ()
+  inline tstamp now (EV_P) throw ()
   {
-    return ev_time ();
+    return ev_now (EV_A);
   }
 
   inline void delay (tstamp interval) throw ()
diff --git a/third_party/libev/ev.3 b/third_party/libev/ev.3
index f2a4514582eb89bd724adb6e3dae9e0d49006f44..d2c36f3b6476a7e4aad7d0e87adf5220a47e1869 100644
--- a/third_party/libev/ev.3
+++ b/third_party/libev/ev.3
@@ -1,4 +1,4 @@
-.\" Automatically generated by Pod::Man 2.22 (Pod::Simple 3.07)
+.\" Automatically generated by Pod::Man 2.23 (Pod::Simple 3.14)
 .\"
 .\" Standard preamble:
 .\" ========================================================================
@@ -124,7 +124,7 @@
 .\" ========================================================================
 .\"
 .IX Title "LIBEV 3"
-.TH LIBEV 3 "2011-02-16" "libev-4.04" "libev - high performance full featured event loop"
+.TH LIBEV 3 "2012-04-19" "libev-4.11" "libev - high performance full featured event loop"
 .\" For nroff, turn off justification.  Always turn off hyphenation; it makes
 .\" way too many mistakes in technical documents.
 .if n .ad l
@@ -246,7 +246,7 @@ loop mechanism itself (\f(CW\*(C`ev_idle\*(C'\fR, \f(CW\*(C`ev_embed\*(C'\fR, \f
 limited support for fork events (\f(CW\*(C`ev_fork\*(C'\fR).
 .PP
 It also is quite fast (see this
-<benchmark> comparing it to libevent
+benchmark <http://libev.schmorp.de/bench.html> comparing it to libevent
 for example).
 .SS "\s-1CONVENTIONS\s0"
 .IX Subsection "CONVENTIONS"
@@ -296,12 +296,18 @@ library in any way.
 Returns the current time as libev would use it. Please note that the
 \&\f(CW\*(C`ev_now\*(C'\fR function is usually faster and also often returns the timestamp
 you actually want to know. Also interesting is the combination of
-\&\f(CW\*(C`ev_update_now\*(C'\fR and \f(CW\*(C`ev_now\*(C'\fR.
+\&\f(CW\*(C`ev_now_update\*(C'\fR and \f(CW\*(C`ev_now\*(C'\fR.
 .IP "ev_sleep (ev_tstamp interval)" 4
 .IX Item "ev_sleep (ev_tstamp interval)"
-Sleep for the given interval: The current thread will be blocked until
-either it is interrupted or the given time interval has passed. Basically
-this is a sub-second-resolution \f(CW\*(C`sleep ()\*(C'\fR.
+Sleep for the given interval: The current thread will be blocked
+until either it is interrupted or the given time interval has
+passed (approximately \- it might return a bit earlier even if not
+interrupted). Returns immediately if \f(CW\*(C`interval <= 0\*(C'\fR.
+.Sp
+Basically this is a sub-second-resolution \f(CW\*(C`sleep ()\*(C'\fR.
+.Sp
+The range of the \f(CW\*(C`interval\*(C'\fR is limited \- libev only guarantees to work
+with sleep times of up to one day (\f(CW\*(C`interval <= 86400\*(C'\fR).
 .IP "int ev_version_major ()" 4
 .IX Item "int ev_version_major ()"
 .PD 0
@@ -363,8 +369,8 @@ the current system, you would need to look at \f(CW\*(C`ev_embeddable_backends (
 & ev_supported_backends ()\*(C'\fR, likewise for recommended ones.
 .Sp
 See the description of \f(CW\*(C`ev_embed\*(C'\fR watchers for more info.
-.IP "ev_set_allocator (void *(*cb)(void *ptr, long size))" 4
-.IX Item "ev_set_allocator (void *(*cb)(void *ptr, long size))"
+.IP "ev_set_allocator (void *(*cb)(void *ptr, long size) throw ())" 4
+.IX Item "ev_set_allocator (void *(*cb)(void *ptr, long size) throw ())"
 Sets the allocation function to use (the prototype is similar \- the
 semantics are identical to the \f(CW\*(C`realloc\*(C'\fR C89/SuS/POSIX function). It is
 used to allocate and free memory (no surprises here). If it returns zero
@@ -400,8 +406,8 @@ retries (example requires a standards-compliant \f(CW\*(C`realloc\*(C'\fR).
 \&   ...
 \&   ev_set_allocator (persistent_realloc);
 .Ve
-.IP "ev_set_syserr_cb (void (*cb)(const char *msg))" 4
-.IX Item "ev_set_syserr_cb (void (*cb)(const char *msg))"
+.IP "ev_set_syserr_cb (void (*cb)(const char *msg) throw ())" 4
+.IX Item "ev_set_syserr_cb (void (*cb)(const char *msg) throw ())"
 Set the callback function to call on a retryable system call error (such
 as failed select, poll, epoll_wait). The message is a printable string
 indicating the system call or subsystem causing the problem. If this
@@ -555,7 +561,7 @@ example) that can't properly initialise their signal masks.
 .el .IP "\f(CWEVFLAG_NOSIGMASK\fR" 4
 .IX Item "EVFLAG_NOSIGMASK"
 When this flag is specified, then libev will avoid to modify the signal
-mask. Specifically, this means you ahve to make sure signals are unblocked
+mask. Specifically, this means you have to make sure signals are unblocked
 when you want to receive them.
 .Sp
 This behaviour is useful when you want to do your own signal handling, or
@@ -603,10 +609,10 @@ This backend maps \f(CW\*(C`EV_READ\*(C'\fR to \f(CW\*(C`POLLIN | POLLERR | POLL
 Use the linux-specific \fIepoll\fR\|(7) interface (for both pre\- and post\-2.6.9
 kernels).
 .Sp
-For few fds, this backend is a bit little slower than poll and select,
-but it scales phenomenally better. While poll and select usually scale
-like O(total_fds) where n is the total number of fds (or the highest fd),
-epoll scales either O(1) or O(active_fds).
+For few fds, this backend is a bit little slower than poll and select, but
+it scales phenomenally better. While poll and select usually scale like
+O(total_fds) where total_fds is the total number of fds (or the highest
+fd), epoll scales either O(1) or O(active_fds).
 .Sp
 The epoll mechanism deserves honorable mention as the most misdesigned
 of the more advanced event mechanisms: mere annoyances include silently
@@ -619,19 +625,22 @@ forks then \fIboth\fR parent and child process have to recreate the epoll
 set, which can take considerable time (one syscall per file descriptor)
 and is of course hard to detect.
 .Sp
-Epoll is also notoriously buggy \- embedding epoll fds \fIshould\fR work, but
-of course \fIdoesn't\fR, and epoll just loves to report events for totally
-\&\fIdifferent\fR file descriptors (even already closed ones, so one cannot
-even remove them from the set) than registered in the set (especially
-on \s-1SMP\s0 systems). Libev tries to counter these spurious notifications by
-employing an additional generation counter and comparing that against the
-events to filter out spurious ones, recreating the set when required. Last
+Epoll is also notoriously buggy \- embedding epoll fds \fIshould\fR work,
+but of course \fIdoesn't\fR, and epoll just loves to report events for
+totally \fIdifferent\fR file descriptors (even already closed ones, so
+one cannot even remove them from the set) than registered in the set
+(especially on \s-1SMP\s0 systems). Libev tries to counter these spurious
+notifications by employing an additional generation counter and comparing
+that against the events to filter out spurious ones, recreating the set
+when required. Epoll also erroneously rounds down timeouts, but gives you
+no way to know when and by how much, so sometimes you have to busy-wait
+because epoll returns immediately despite a nonzero timeout. And last
 not least, it also refuses to work with some file descriptors which work
 perfectly fine with \f(CW\*(C`select\*(C'\fR (files, many character devices...).
 .Sp
-Epoll is truly the train wreck analog among event poll mechanisms,
-a frankenpoll, cobbled together in a hurry, no thought to design or
-interaction with others.
+Epoll is truly the train wreck among event poll mechanisms, a frankenpoll,
+cobbled together in a hurry, no thought to design or interaction with
+others. Oh, the pain, will it ever stop...
 .Sp
 While stopping, setting and starting an I/O watcher in the same iteration
 will result in some caching, there is still a system call per such
@@ -678,9 +687,9 @@ It scales in the same way as the epoll backend, but the interface to the
 kernel is more efficient (which says nothing about its actual speed, of
 course). While stopping, setting and starting an I/O watcher does never
 cause an extra system call as with \f(CW\*(C`EVBACKEND_EPOLL\*(C'\fR, it still adds up to
-two event changes per incident. Support for \f(CW\*(C`fork ()\*(C'\fR is very bad (but
-sane, unlike epoll) and it drops fds silently in similarly hard-to-detect
-cases
+two event changes per incident. Support for \f(CW\*(C`fork ()\*(C'\fR is very bad (you
+might have to leak fd's on fork, but it's more sane than epoll) and it
+drops fds silently in similarly hard-to-detect cases
 .Sp
 This backend usually performs well under most conditions.
 .Sp
@@ -719,11 +728,11 @@ hacks).
 .Sp
 On the negative side, the interface is \fIbizarre\fR \- so bizarre that
 even sun itself gets it wrong in their code examples: The event polling
-function sometimes returning events to the caller even though an error
+function sometimes returns events to the caller even though an error
 occurred, but with no indication whether it has done so or not (yes, it's
-even documented that way) \- deadly for edge-triggered interfaces where
-you absolutely have to know whether an event occurred or not because you
-have to re-arm the watcher.
+even documented that way) \- deadly for edge-triggered interfaces where you
+absolutely have to know whether an event occurred or not because you have
+to re-arm the watcher.
 .Sp
 Fortunately libev seems to be able to work around these idiocies.
 .Sp
@@ -902,18 +911,22 @@ without a previous call to \f(CW\*(C`ev_suspend\*(C'\fR.
 .Sp
 Calling \f(CW\*(C`ev_suspend\*(C'\fR/\f(CW\*(C`ev_resume\*(C'\fR has the side effect of updating the
 event loop time (see \f(CW\*(C`ev_now_update\*(C'\fR).
-.IP "ev_run (loop, int flags)" 4
-.IX Item "ev_run (loop, int flags)"
+.IP "bool ev_run (loop, int flags)" 4
+.IX Item "bool ev_run (loop, int flags)"
 Finally, this is it, the event handler. This function usually is called
 after you have initialised all your watchers and you want to start
 handling events. It will ask the operating system for any new events, call
-the watcher callbacks, an then repeat the whole process indefinitely: This
+the watcher callbacks, and then repeat the whole process indefinitely: This
 is why event loops are called \fIloops\fR.
 .Sp
 If the flags argument is specified as \f(CW0\fR, it will keep handling events
 until either no event watchers are active anymore or \f(CW\*(C`ev_break\*(C'\fR was
 called.
 .Sp
+The return value is false if there are no more active watchers (which
+usually means \*(L"all jobs done\*(R" or \*(L"deadlock\*(R"), and true in all other cases
+(which usually means " you should call \f(CW\*(C`ev_run\*(C'\fR again").
+.Sp
 Please note that an explicit \f(CW\*(C`ev_break\*(C'\fR is usually better than
 relying on all watchers to be stopped when deciding when a program has
 finished (especially in interactive programs), but having a program
@@ -921,8 +934,8 @@ that automatically loops as long as it has to and no longer by virtue
 of relying on its watchers stopping correctly, that is truly a thing of
 beauty.
 .Sp
-This function is also \fImostly\fR exception-safe \- you can break out of
-a \f(CW\*(C`ev_run\*(C'\fR call by calling \f(CW\*(C`longjmp\*(C'\fR in a callback, throwing a \*(C+
+This function is \fImostly\fR exception-safe \- you can break out of a
+\&\f(CW\*(C`ev_run\*(C'\fR call by calling \f(CW\*(C`longjmp\*(C'\fR in a callback, throwing a \*(C+
 exception and so on. This does not decrement the \f(CW\*(C`ev_depth\*(C'\fR value, nor
 will it clear any outstanding \f(CW\*(C`EVBREAK_ONE\*(C'\fR breaks.
 .Sp
@@ -944,7 +957,9 @@ with something not expressible using other libev watchers (i.e. "roll your
 own \f(CW\*(C`ev_run\*(C'\fR"). However, a pair of \f(CW\*(C`ev_prepare\*(C'\fR/\f(CW\*(C`ev_check\*(C'\fR watchers is
 usually a better approach for this kind of thing.
 .Sp
-Here are the gory details of what \f(CW\*(C`ev_run\*(C'\fR does:
+Here are the gory details of what \f(CW\*(C`ev_run\*(C'\fR does (this is for your
+understanding, not a guarantee that things will work exactly like this in
+future versions):
 .Sp
 .Vb 10
 \&   \- Increment loop depth.
@@ -1069,10 +1084,11 @@ overhead for the actual polling but can deliver many events at once.
 By setting a higher \fIio collect interval\fR you allow libev to spend more
 time collecting I/O events, so you can handle more events per iteration,
 at the cost of increasing latency. Timeouts (both \f(CW\*(C`ev_periodic\*(C'\fR and
-\&\f(CW\*(C`ev_timer\*(C'\fR) will be not affected. Setting this to a non-null value will
+\&\f(CW\*(C`ev_timer\*(C'\fR) will not be affected. Setting this to a non-null value will
 introduce an additional \f(CW\*(C`ev_sleep ()\*(C'\fR call into most loop iterations. The
 sleep time ensures that libev will not poll for I/O events more often then
-once per this interval, on average.
+once per this interval, on average (as long as the host time resolution is
+good enough).
 .Sp
 Likewise, by setting a higher \fItimeout collect interval\fR you allow libev
 to spend more time collecting timeouts, at the expense of increased
@@ -1126,15 +1142,15 @@ invoke the actual watchers inside another context (another thread etc.).
 .Sp
 If you want to reset the callback, use \f(CW\*(C`ev_invoke_pending\*(C'\fR as new
 callback.
-.IP "ev_set_loop_release_cb (loop, void (*release)(\s-1EV_P\s0), void (*acquire)(\s-1EV_P\s0))" 4
-.IX Item "ev_set_loop_release_cb (loop, void (*release)(EV_P), void (*acquire)(EV_P))"
+.IP "ev_set_loop_release_cb (loop, void (*release)(\s-1EV_P\s0) throw (), void (*acquire)(\s-1EV_P\s0) throw ())" 4
+.IX Item "ev_set_loop_release_cb (loop, void (*release)(EV_P) throw (), void (*acquire)(EV_P) throw ())"
 Sometimes you want to share the same loop between multiple threads. This
 can be done relatively simply by putting mutex_lock/unlock calls around
 each call to a libev function.
 .Sp
 However, \f(CW\*(C`ev_run\*(C'\fR can run an indefinite time, so it is not feasible
 to wait for it to return. One way around this is to wake up the event
-loop via \f(CW\*(C`ev_break\*(C'\fR and \f(CW\*(C`av_async_send\*(C'\fR, another way is to set these
+loop via \f(CW\*(C`ev_break\*(C'\fR and \f(CW\*(C`ev_async_send\*(C'\fR, another way is to set these
 \&\fIrelease\fR and \fIacquire\fR callbacks on the loop.
 .Sp
 When set, then \f(CW\*(C`release\*(C'\fR will be called just before the thread is
@@ -1491,7 +1507,7 @@ transition between them will be described in more detail \- and while these
 rules might look complicated, they usually do \*(L"the right thing\*(R".
 .IP "initialiased" 4
 .IX Item "initialiased"
-Before a watcher can be registered with the event looop it has to be
+Before a watcher can be registered with the event loop it has to be
 initialised. This can be done with a call to \f(CW\*(C`ev_TYPE_init\*(C'\fR, or calls to
 \&\f(CW\*(C`ev_init\*(C'\fR followed by the watcher-specific \f(CW\*(C`ev_TYPE_set\*(C'\fR function.
 .Sp
@@ -1873,10 +1889,11 @@ monotonic clock option helps a lot here).
 .PP
 The callback is guaranteed to be invoked only \fIafter\fR its timeout has
 passed (not \fIat\fR, so on systems with very low-resolution clocks this
-might introduce a small delay). If multiple timers become ready during the
-same loop iteration then the ones with earlier time-out values are invoked
-before ones of the same priority with later time-out values (but this is
-no longer true when a callback calls \f(CW\*(C`ev_run\*(C'\fR recursively).
+might introduce a small delay, see \*(L"the special problem of being too
+early\*(R", below). If multiple timers become ready during the same loop
+iteration then the ones with earlier time-out values are invoked before
+ones of the same priority with later time-out values (but this is no
+longer true when a callback calls \f(CW\*(C`ev_run\*(C'\fR recursively).
 .PP
 \fIBe smart about timeouts\fR
 .IX Subsection "Be smart about timeouts"
@@ -1968,68 +1985,84 @@ In this case, it would be more efficient to leave the \f(CW\*(C`ev_timer\*(C'\fR
 but remember the time of last activity, and check for a real timeout only
 within the callback:
 .Sp
-.Vb 1
+.Vb 3
+\&   ev_tstamp timeout = 60.;
 \&   ev_tstamp last_activity; // time of last activity
+\&   ev_timer timer;
 \&
 \&   static void
 \&   callback (EV_P_ ev_timer *w, int revents)
 \&   {
-\&     ev_tstamp now     = ev_now (EV_A);
-\&     ev_tstamp timeout = last_activity + 60.;
+\&     // calculate when the timeout would happen
+\&     ev_tstamp after = last_activity \- ev_now (EV_A) + timeout;
 \&
-\&     // if last_activity + 60. is older than now, we did time out
-\&     if (timeout < now)
+\&     // if negative, it means we the timeout already occured
+\&     if (after < 0.)
 \&       {
 \&         // timeout occurred, take action
 \&       }
 \&     else
 \&       {
-\&         // callback was invoked, but there was some activity, re\-arm
-\&         // the watcher to fire in last_activity + 60, which is
-\&         // guaranteed to be in the future, so "again" is positive:
-\&         w\->repeat = timeout \- now;
-\&         ev_timer_again (EV_A_ w);
+\&         // callback was invoked, but there was some recent 
+\&         // activity. simply restart the timer to time out
+\&         // after "after" seconds, which is the earliest time
+\&         // the timeout can occur.
+\&         ev_timer_set (w, after, 0.);
+\&         ev_timer_start (EV_A_ w);
 \&       }
 \&   }
 .Ve
 .Sp
-To summarise the callback: first calculate the real timeout (defined
-as \*(L"60 seconds after the last activity\*(R"), then check if that time has
-been reached, which means something \fIdid\fR, in fact, time out. Otherwise
-the callback was invoked too early (\f(CW\*(C`timeout\*(C'\fR is in the future), so
-re-schedule the timer to fire at that future time, to see if maybe we have
-a timeout then.
+To summarise the callback: first calculate in how many seconds the
+timeout will occur (by calculating the absolute time when it would occur,
+\&\f(CW\*(C`last_activity + timeout\*(C'\fR, and subtracting the current time, \f(CW\*(C`ev_now
+(EV_A)\*(C'\fR from that).
 .Sp
-Note how \f(CW\*(C`ev_timer_again\*(C'\fR is used, taking advantage of the
-\&\f(CW\*(C`ev_timer_again\*(C'\fR optimisation when the timer is already running.
+If this value is negative, then we are already past the timeout, i.e. we
+timed out, and need to do whatever is needed in this case.
+.Sp
+Otherwise, we now the earliest time at which the timeout would trigger,
+and simply start the timer with this timeout value.
+.Sp
+In other words, each time the callback is invoked it will check whether
+the timeout cocured. If not, it will simply reschedule itself to check
+again at the earliest time it could time out. Rinse. Repeat.
 .Sp
 This scheme causes more callback invocations (about one every 60 seconds
 minus half the average time between activity), but virtually no calls to
 libev to change the timeout.
 .Sp
-To start the timer, simply initialise the watcher and set \f(CW\*(C`last_activity\*(C'\fR
-to the current time (meaning we just have some activity :), then call the
-callback, which will \*(L"do the right thing\*(R" and start the timer:
+To start the machinery, simply initialise the watcher and set
+\&\f(CW\*(C`last_activity\*(C'\fR to the current time (meaning there was some activity just
+now), then call the callback, which will \*(L"do the right thing\*(R" and start
+the timer:
 .Sp
 .Vb 3
-\&   ev_init (timer, callback);
-\&   last_activity = ev_now (loop);
-\&   callback (loop, timer, EV_TIMER);
+\&   last_activity = ev_now (EV_A);
+\&   ev_init (&timer, callback);
+\&   callback (EV_A_ &timer, 0);
 .Ve
 .Sp
-And when there is some activity, simply store the current time in
+When there is some activity, simply store the current time in
 \&\f(CW\*(C`last_activity\*(C'\fR, no libev calls at all:
 .Sp
-.Vb 1
-\&   last_activity = ev_now (loop);
+.Vb 2
+\&   if (activity detected)
+\&     last_activity = ev_now (EV_A);
+.Ve
+.Sp
+When your timeout value changes, then the timeout can be changed by simply
+providing a new value, stopping the timer and calling the callback, which
+will agaion do the right thing (for example, time out immediately :).
+.Sp
+.Vb 3
+\&   timeout = new_value;
+\&   ev_timer_stop (EV_A_ &timer);
+\&   callback (EV_A_ &timer, 0);
 .Ve
 .Sp
 This technique is slightly more complex, but in most cases where the
 time-out is unlikely to be triggered, much more efficient.
-.Sp
-Changing the timeout is trivial as well (if it isn't hard-coded in the
-callback :) \- just change the timeout and invoke the callback, which will
-fix things for you.
 .IP "4. Wee, just use a double-linked list for your timeouts." 4
 .IX Item "4. Wee, just use a double-linked list for your timeouts."
 If there is not one request, but many thousands (millions...), all
@@ -2063,11 +2096,49 @@ rather complicated, but extremely efficient, something that really pays
 off after the first million or so of active timers, i.e. it's usually
 overkill :)
 .PP
+\fIThe special problem of being too early\fR
+.IX Subsection "The special problem of being too early"
+.PP
+If you ask a timer to call your callback after three seconds, then
+you expect it to be invoked after three seconds \- but of course, this
+cannot be guaranteed to infinite precision. Less obviously, it cannot be
+guaranteed to any precision by libev \- imagine somebody suspending the
+process with a \s-1STOP\s0 signal for a few hours for example.
+.PP
+So, libev tries to invoke your callback as soon as possible \fIafter\fR the
+delay has occurred, but cannot guarantee this.
+.PP
+A less obvious failure mode is calling your callback too early: many event
+loops compare timestamps with a \*(L"elapsed delay >= requested delay\*(R", but
+this can cause your callback to be invoked much earlier than you would
+expect.
+.PP
+To see why, imagine a system with a clock that only offers full second
+resolution (think windows if you can't come up with a broken enough \s-1OS\s0
+yourself). If you schedule a one-second timer at the time 500.9, then the
+event loop will schedule your timeout to elapse at a system time of 500
+(500.9 truncated to the resolution) + 1, or 501.
+.PP
+If an event library looks at the timeout 0.1s later, it will see \*(L"501 >=
+501\*(R" and invoke the callback 0.1s after it was started, even though a
+one-second delay was requested \- this is being \*(L"too early\*(R", despite best
+intentions.
+.PP
+This is the reason why libev will never invoke the callback if the elapsed
+delay equals the requested delay, but only when the elapsed delay is
+larger than the requested delay. In the example above, libev would only invoke
+the callback at system time 502, or 1.1s after the timer was started.
+.PP
+So, while libev cannot guarantee that your callback will be invoked
+exactly when requested, it \fIcan\fR and \fIdoes\fR guarantee that the requested
+delay has actually elapsed, or in other words, it always errs on the \*(L"too
+late\*(R" side of things.
+.PP
 \fIThe special problem of time updates\fR
 .IX Subsection "The special problem of time updates"
 .PP
-Establishing the current time is a costly operation (it usually takes at
-least two system calls): \s-1EV\s0 therefore updates its idea of the current
+Establishing the current time is a costly operation (it usually takes
+at least one system call): \s-1EV\s0 therefore updates its idea of the current
 time only before and after \f(CW\*(C`ev_run\*(C'\fR collects new events, which causes a
 growing difference between \f(CW\*(C`ev_now ()\*(C'\fR and \f(CW\*(C`ev_time ()\*(C'\fR when handling
 lots of events in one iteration.
@@ -2086,6 +2157,40 @@ If the event loop is suspended for a long time, you can also force an
 update of the time returned by \f(CW\*(C`ev_now ()\*(C'\fR by calling \f(CW\*(C`ev_now_update
 ()\*(C'\fR.
 .PP
+\fIThe special problem of unsynchronised clocks\fR
+.IX Subsection "The special problem of unsynchronised clocks"
+.PP
+Modern systems have a variety of clocks \- libev itself uses the normal
+\&\*(L"wall clock\*(R" clock and, if available, the monotonic clock (to avoid time
+jumps).
+.PP
+Neither of these clocks is synchronised with each other or any other clock
+on the system, so \f(CW\*(C`ev_time ()\*(C'\fR might return a considerably different time
+than \f(CW\*(C`gettimeofday ()\*(C'\fR or \f(CW\*(C`time ()\*(C'\fR. On a GNU/Linux system, for example,
+a call to \f(CW\*(C`gettimeofday\*(C'\fR might return a second count that is one higher
+than a directly following call to \f(CW\*(C`time\*(C'\fR.
+.PP
+The moral of this is to only compare libev-related timestamps with
+\&\f(CW\*(C`ev_time ()\*(C'\fR and \f(CW\*(C`ev_now ()\*(C'\fR, at least if you want better precision than
+a second or so.
+.PP
+One more problem arises due to this lack of synchronisation: if libev uses
+the system monotonic clock and you compare timestamps from \f(CW\*(C`ev_time\*(C'\fR
+or \f(CW\*(C`ev_now\*(C'\fR from when you started your timer and when your callback is
+invoked, you will find that sometimes the callback is a bit \*(L"early\*(R".
+.PP
+This is because \f(CW\*(C`ev_timer\*(C'\fRs work in real time, not wall clock time, so
+libev makes sure your callback is not invoked before the delay happened,
+\&\fImeasured according to the real time\fR, not the system clock.
+.PP
+If your timeouts are based on a physical timescale (e.g. \*(L"time out this
+connection after 100 seconds\*(R") then this shouldn't bother you as it is
+exactly the right behaviour.
+.PP
+If you want to compare wall clock/system timestamps to your timers, then
+you need to use \f(CW\*(C`ev_periodic\*(C'\fRs, as these are based on the wall clock
+time, where your comparisons will always generate correct results.
+.PP
 \fIThe special problems of suspended animation\fR
 .IX Subsection "The special problems of suspended animation"
 .PP
@@ -2138,18 +2243,28 @@ keep up with the timer (because it takes longer than those 10 seconds to
 do stuff) the timer will not fire more than once per event loop iteration.
 .IP "ev_timer_again (loop, ev_timer *)" 4
 .IX Item "ev_timer_again (loop, ev_timer *)"
-This will act as if the timer timed out and restart it again if it is
-repeating. The exact semantics are:
-.Sp
-If the timer is pending, its pending status is cleared.
-.Sp
-If the timer is started but non-repeating, stop it (as if it timed out).
+This will act as if the timer timed out, and restarts it again if it is
+repeating. It basically works like calling \f(CW\*(C`ev_timer_stop\*(C'\fR, updating the
+timeout to the \f(CW\*(C`repeat\*(C'\fR value and calling \f(CW\*(C`ev_timer_start\*(C'\fR.
 .Sp
-If the timer is repeating, either start it if necessary (with the
-\&\f(CW\*(C`repeat\*(C'\fR value), or reset the running timer to the \f(CW\*(C`repeat\*(C'\fR value.
+The exact semantics are as in the following rules, all of which will be
+applied to the watcher:
+.RS 4
+.IP "If the timer is pending, the pending status is always cleared." 4
+.IX Item "If the timer is pending, the pending status is always cleared."
+.PD 0
+.IP "If the timer is started but non-repeating, stop it (as if it timed out, without invoking it)." 4
+.IX Item "If the timer is started but non-repeating, stop it (as if it timed out, without invoking it)."
+.ie n .IP "If the timer is repeating, make the ""repeat"" value the new timeout and start the timer, if necessary." 4
+.el .IP "If the timer is repeating, make the \f(CWrepeat\fR value the new timeout and start the timer, if necessary." 4
+.IX Item "If the timer is repeating, make the repeat value the new timeout and start the timer, if necessary."
+.RE
+.RS 4
+.PD
 .Sp
 This sounds a bit complicated, see \*(L"Be smart about timeouts\*(R", above, for a
 usage example.
+.RE
 .IP "ev_tstamp ev_timer_remaining (loop, ev_timer *)" 4
 .IX Item "ev_tstamp ev_timer_remaining (loop, ev_timer *)"
 Returns the remaining time until a timer fires. If the timer is active,
@@ -2279,9 +2394,12 @@ Another way to think about it (for the mathematically inclined) is that
 \&\f(CW\*(C`ev_periodic\*(C'\fR will try to run the callback in this mode at the next possible
 time where \f(CW\*(C`time = offset (mod interval)\*(C'\fR, regardless of any time jumps.
 .Sp
-For numerical stability it is preferable that the \f(CW\*(C`offset\*(C'\fR value is near
-\&\f(CW\*(C`ev_now ()\*(C'\fR (the current time), but there is no range requirement for
-this value, and in fact is often specified as zero.
+The \f(CW\*(C`interval\*(C'\fR \fI\s-1MUST\s0\fR be positive, and for numerical stability, the
+interval value should be higher than \f(CW\*(C`1/8192\*(C'\fR (which is around 100
+microseconds) and \f(CW\*(C`offset\*(C'\fR should be higher than \f(CW0\fR and should have
+at most a similar magnitude as the current time (say, within a factor of
+ten). Typical values for offset are, in fact, \f(CW0\fR or something between
+\&\f(CW0\fR and \f(CW\*(C`interval\*(C'\fR, which is also the recommended range.
 .Sp
 Note also that there is an upper limit to how often a timer can fire (\s-1CPU\s0
 speed for example), so if \f(CW\*(C`interval\*(C'\fR is very small then timing stability
@@ -3333,9 +3451,6 @@ of \*(L"global async watchers\*(R" by using a watcher on an otherwise unused
 signal, and \f(CW\*(C`ev_feed_signal\*(C'\fR to signal this watcher from another thread,
 even without knowing which loop owns the signal.
 .PP
-Unlike \f(CW\*(C`ev_signal\*(C'\fR watchers, \f(CW\*(C`ev_async\*(C'\fR works with any event loop, not
-just the default loop.
-.PP
 \fIQueueing\fR
 .IX Subsection "Queueing"
 .PP
@@ -3439,13 +3554,16 @@ signal or similar contexts (see the discussion of \f(CW\*(C`EV_ATOMIC_T\*(C'\fR
 embedding section below on what exactly this means).
 .Sp
 Note that, as with other watchers in libev, multiple events might get
-compressed into a single callback invocation (another way to look at this
-is that \f(CW\*(C`ev_async\*(C'\fR watchers are level-triggered, set on \f(CW\*(C`ev_async_send\*(C'\fR,
-reset when the event loop detects that).
-.Sp
-This call incurs the overhead of a system call only once per event loop
-iteration, so while the overhead might be noticeable, it doesn't apply to
-repeated calls to \f(CW\*(C`ev_async_send\*(C'\fR for the same event loop.
+compressed into a single callback invocation (another way to look at
+this is that \f(CW\*(C`ev_async\*(C'\fR watchers are level-triggered: they are set on
+\&\f(CW\*(C`ev_async_send\*(C'\fR, reset when the event loop detects that).
+.Sp
+This call incurs the overhead of at most one extra system call per event
+loop iteration, if the event loop is blocked, and no syscall at all if
+the event loop (or your program) is processing events. That means that
+repeated calls are basically free (there is no need to avoid calls for
+performance reasons) and that the overhead becomes smaller (typically
+zero) under load.
 .IP "bool = ev_async_pending (ev_async *)" 4
 .IX Item "bool = ev_async_pending (ev_async *)"
 Returns a non-zero value when \f(CW\*(C`ev_async_send\*(C'\fR has been called on the
@@ -3503,7 +3621,7 @@ Example: wait up to ten seconds for data to appear on \s-1STDIN_FILENO\s0.
 .IP "ev_feed_fd_event (loop, int fd, int revents)" 4
 .IX Item "ev_feed_fd_event (loop, int fd, int revents)"
 Feed an event on the given fd, as if a file descriptor backend detected
-the given events it.
+the given events.
 .IP "ev_feed_signal_event (loop, int signum)" 4
 .IX Item "ev_feed_signal_event (loop, int signum)"
 Feed an event as if the given signal occurred. See also \f(CW\*(C`ev_feed_signal\*(C'\fR,
@@ -3587,6 +3705,49 @@ real programmers):
 \&       (((char *)w) \- offsetof (struct my_biggy, t2));
 \&   }
 .Ve
+.SS "\s-1AVOIDING\s0 \s-1FINISHING\s0 \s-1BEFORE\s0 \s-1RETURNING\s0"
+.IX Subsection "AVOIDING FINISHING BEFORE RETURNING"
+Often you have structures like this in event-based programs:
+.PP
+.Vb 4
+\&  callback ()
+\&  {
+\&    free (request);
+\&  }
+\&
+\&  request = start_new_request (..., callback);
+.Ve
+.PP
+The intent is to start some \*(L"lengthy\*(R" operation. The \f(CW\*(C`request\*(C'\fR could be
+used to cancel the operation, or do other things with it.
+.PP
+It's not uncommon to have code paths in \f(CW\*(C`start_new_request\*(C'\fR that
+immediately invoke the callback, for example, to report errors. Or you add
+some caching layer that finds that it can skip the lengthy aspects of the
+operation and simply invoke the callback with the result.
+.PP
+The problem here is that this will happen \fIbefore\fR \f(CW\*(C`start_new_request\*(C'\fR
+has returned, so \f(CW\*(C`request\*(C'\fR is not set.
+.PP
+Even if you pass the request by some safer means to the callback, you
+might want to do something to the request after starting it, such as
+canceling it, which probably isn't working so well when the callback has
+already been invoked.
+.PP
+A common way around all these issues is to make sure that
+\&\f(CW\*(C`start_new_request\*(C'\fR \fIalways\fR returns before the callback is invoked. If
+\&\f(CW\*(C`start_new_request\*(C'\fR immediately knows the result, it can artificially
+delay invoking the callback by e.g. using a \f(CW\*(C`prepare\*(C'\fR or \f(CW\*(C`idle\*(C'\fR watcher
+for example, or more sneakily, by reusing an existing (stopped) watcher
+and pushing it into the pending queue:
+.PP
+.Vb 2
+\&   ev_set_cb (watcher, callback);
+\&   ev_feed_event (EV_A_ watcher, 0);
+.Ve
+.PP
+This way, \f(CW\*(C`start_new_request\*(C'\fR can safely return before the callback is
+invoked, while not delaying callback invocation too much.
 .SS "\s-1MODEL/NESTED\s0 \s-1EVENT\s0 \s-1LOOP\s0 \s-1INVOCATIONS\s0 \s-1AND\s0 \s-1EXIT\s0 \s-1CONDITIONS\s0"
 .IX Subsection "MODEL/NESTED EVENT LOOP INVOCATIONS AND EXIT CONDITIONS"
 Often (especially in \s-1GUI\s0 toolkits) there are places where you have
@@ -3610,7 +3771,7 @@ triggered, using \f(CW\*(C`EVRUN_ONCE\*(C'\fR:
 \&   while (!exit_main_loop)
 \&     ev_run (EV_DEFAULT_ EVRUN_ONCE);
 \&
-\&   // in a model watcher
+\&   // in a modal watcher
 \&   int exit_nested_loop = 0;
 \&
 \&   while (!exit_nested_loop)
@@ -3819,7 +3980,7 @@ called):
 .PP
 That basically suspends the coroutine inside \f(CW\*(C`wait_for_event\*(C'\fR and
 continues the libev coroutine, which, when appropriate, switches back to
-this or any other coroutine. I am sure if you sue this your own :)
+this or any other coroutine.
 .PP
 You can do similar tricks if you have, say, threads with an event queue \-
 instead of storing a coroutine, you store the queue object and instead of
@@ -3875,6 +4036,40 @@ The libev emulation is \fInot\fR \s-1ABI\s0 compatible to libevent, you need
 to use the libev header file and library.
 .SH "\*(C+ SUPPORT"
 .IX Header " SUPPORT"
+.SS "C \s-1API\s0"
+.IX Subsection "C API"
+The normal C \s-1API\s0 should work fine when used from \*(C+: both ev.h and the
+libev sources can be compiled as \*(C+. Therefore, code that uses the C \s-1API\s0
+will work fine.
+.PP
+Proper exception specifications might have to be added to callbacks passed
+to libev: exceptions may be thrown only from watcher callbacks, all
+other callbacks (allocator, syserr, loop acquire/release and periodioc
+reschedule callbacks) must not throw exceptions, and might need a \f(CW\*(C`throw
+()\*(C'\fR specification. If you have code that needs to be compiled as both C
+and \*(C+ you can use the \f(CW\*(C`EV_THROW\*(C'\fR macro for this:
+.PP
+.Vb 6
+\&   static void
+\&   fatal_error (const char *msg) EV_THROW
+\&   {
+\&     perror (msg);
+\&     abort ();
+\&   }
+\&
+\&   ...
+\&   ev_set_syserr_cb (fatal_error);
+.Ve
+.PP
+The only \s-1API\s0 functions that can currently throw exceptions are \f(CW\*(C`ev_run\*(C'\fR,
+\&\f(CW\*(C`ev_inoke\*(C'\fR, \f(CW\*(C`ev_invoke_pending\*(C'\fR and \f(CW\*(C`ev_loop_destroy\*(C'\fR (the latter
+because it runs cleanup watchers).
+.PP
+Throwing exceptions in watcher callbacks is only supported if libev itself
+is compiled with a \*(C+ compiler or your C and \*(C+ environments allow
+throwing exceptions through C libraries (most do).
+.SS "\*(C+ \s-1API\s0"
+.IX Subsection " API"
 Libev comes with some simplistic wrapper classes for \*(C+ that mainly allow
 you to use some convenience methods to start/stop watchers and also change
 the callback model to a model using method callbacks on objects.
@@ -3901,6 +4096,10 @@ to add as long as they only need one additional pointer for context. If
 you need support for other types of functors please contact the author
 (preferably after implementing it).
 .PP
+For all this to work, your \*(C+ compiler either has to use the same calling
+conventions as your C compiler (for static member functions), or you have
+to embed libev and compile libev itself as \*(C+.
+.PP
 Here is a list of things available in the \f(CW\*(C`ev\*(C'\fR namespace:
 .ie n .IP """ev::READ"", ""ev::WRITE"" etc." 4
 .el .IP "\f(CWev::READ\fR, \f(CWev::WRITE\fR etc." 4
@@ -3917,7 +4116,7 @@ Aliases to the same types/functions as with the \f(CW\*(C`ev_\*(C'\fR prefix.
 For each \f(CW\*(C`ev_TYPE\*(C'\fR watcher in \fIev.h\fR there is a corresponding class of
 the same name in the \f(CW\*(C`ev\*(C'\fR namespace, with the exception of \f(CW\*(C`ev_signal\*(C'\fR
 which is called \f(CW\*(C`ev::sig\*(C'\fR to avoid clashes with the \f(CW\*(C`signal\*(C'\fR macro
-defines by many implementations.
+defined by many implementations.
 .Sp
 All of those classes have these methods:
 .RS 4
@@ -4058,7 +4257,7 @@ watchers in the constructor.
 \&   class myclass
 \&   {
 \&     ev::io   io  ; void io_cb   (ev::io   &w, int revents);
-\&     ev::io2  io2 ; void io2_cb  (ev::io   &w, int revents);
+\&     ev::io   io2 ; void io2_cb  (ev::io   &w, int revents);
 \&     ev::idle idle; void idle_cb (ev::idle &w, int revents);
 \&
 \&     myclass (int fd)
@@ -4107,20 +4306,20 @@ makes rev work even on mingw.
 .IP "Haskell" 4
 .IX Item "Haskell"
 A haskell binding to libev is available at
-<http://hackage.haskell.org/cgi\-bin/hackage\-scripts/package/hlibev>.
+http://hackage.haskell.org/cgi\-bin/hackage\-scripts/package/hlibev <http://hackage.haskell.org/cgi-bin/hackage-scripts/package/hlibev>.
 .IP "D" 4
 .IX Item "D"
 Leandro Lucarella has written a D language binding (\fIev.d\fR) for libev, to
-be found at <http://proj.llucax.com.ar/wiki/evd>.
+be found at <http://www.llucax.com.ar/proj/ev.d/index.html>.
 .IP "Ocaml" 4
 .IX Item "Ocaml"
 Erkki Seppala has written Ocaml bindings for libev, to be found at
-<http://modeemi.cs.tut.fi/~flux/software/ocaml\-ev/>.
+http://modeemi.cs.tut.fi/~flux/software/ocaml\-ev/ <http://modeemi.cs.tut.fi/~flux/software/ocaml-ev/>.
 .IP "Lua" 4
 .IX Item "Lua"
 Brian Maher has written a partial interface to libev for lua (at the
 time of this writing, only \f(CW\*(C`ev_io\*(C'\fR and \f(CW\*(C`ev_timer\*(C'\fR), to be found at
-<http://github.com/brimworks/lua\-ev>.
+http://github.com/brimworks/lua\-ev <http://github.com/brimworks/lua-ev>.
 .SH "MACRO MAGIC"
 .IX Header "MACRO MAGIC"
 Libev can be compiled with a variety of options, the most fundamental
@@ -4165,7 +4364,11 @@ suitable for use with \f(CW\*(C`EV_A\*(C'\fR.
 .el .IP "\f(CWEV_DEFAULT\fR, \f(CWEV_DEFAULT_\fR" 4
 .IX Item "EV_DEFAULT, EV_DEFAULT_"
 Similar to the other two macros, this gives you the value of the default
-loop, if multiple loops are supported (\*(L"ev loop default\*(R").
+loop, if multiple loops are supported (\*(L"ev loop default\*(R"). The default loop
+will be initialised if it isn't already initialised.
+.Sp
+For non-multiplicity builds, these macros do nothing, so you always have
+to initialise the loop somewhere.
 .ie n .IP """EV_DEFAULT_UC"", ""EV_DEFAULT_UC_""" 4
 .el .IP "\f(CWEV_DEFAULT_UC\fR, \f(CWEV_DEFAULT_UC_\fR" 4
 .IX Item "EV_DEFAULT_UC, EV_DEFAULT_UC_"
@@ -4330,6 +4533,14 @@ supported). It will also not define any of the structs usually found in
 .Sp
 In standalone mode, libev will still try to automatically deduce the
 configuration, but has to be more conservative.
+.IP "\s-1EV_USE_FLOOR\s0" 4
+.IX Item "EV_USE_FLOOR"
+If defined to be \f(CW1\fR, libev will use the \f(CW\*(C`floor ()\*(C'\fR function for its
+periodic reschedule calculations, otherwise libev will fall back on a
+portable (slower) implementation. If you enable this, you usually have to
+link against libm or something equivalent. Enabling this when the \f(CW\*(C`floor\*(C'\fR
+function is not available will fail, so the safe default is to not enable
+this.
 .IP "\s-1EV_USE_MONOTONIC\s0" 4
 .IX Item "EV_USE_MONOTONIC"
 If defined to be \f(CW1\fR, libev will try to detect the availability of the
@@ -4451,16 +4662,30 @@ If defined to be \f(CW1\fR, libev will compile in support for the Linux inotify
 interface to speed up \f(CW\*(C`ev_stat\*(C'\fR watchers. Its actual availability will
 be detected at runtime. If undefined, it will be enabled if the headers
 indicate GNU/Linux + Glibc 2.4 or newer, otherwise disabled.
+.IP "\s-1EV_NO_SMP\s0" 4
+.IX Item "EV_NO_SMP"
+If defined to be \f(CW1\fR, libev will assume that memory is always coherent
+between threads, that is, threads can be used, but threads never run on
+different cpus (or different cpu cores). This reduces dependencies
+and makes libev faster.
+.IP "\s-1EV_NO_THREADS\s0" 4
+.IX Item "EV_NO_THREADS"
+If defined to be \f(CW1\fR, libev will assume that it will never be called
+from different threads, which is a stronger assumption than \f(CW\*(C`EV_NO_SMP\*(C'\fR,
+above. This reduces dependencies and makes libev faster.
 .IP "\s-1EV_ATOMIC_T\s0" 4
 .IX Item "EV_ATOMIC_T"
 Libev requires an integer type (suitable for storing \f(CW0\fR or \f(CW1\fR) whose
-access is atomic with respect to other threads or signal contexts. No such
-type is easily found in the C language, so you can provide your own type
-that you know is safe for your purposes. It is used both for signal handler \*(L"locking\*(R"
-as well as for signal and thread safety in \f(CW\*(C`ev_async\*(C'\fR watchers.
+access is atomic and serialised with respect to other threads or signal
+contexts. No such type is easily found in the C language, so you can
+provide your own type that you know is safe for your purposes. It is used
+both for signal handler \*(L"locking\*(R" as well as for signal and thread safety
+in \f(CW\*(C`ev_async\*(C'\fR watchers.
 .Sp
 In the absence of this define, libev will use \f(CW\*(C`sig_atomic_t volatile\*(C'\fR
-(from \fIsignal.h\fR), which is usually good enough on most platforms.
+(from \fIsignal.h\fR), which is usually good enough on most platforms,
+although strictly speaking using a type that also implies a memory fence
+is required.
 .IP "\s-1EV_H\s0 (h)" 4
 .IX Item "EV_H (h)"
 The name of the \fIev.h\fR header file used to include it. The default if
@@ -4488,6 +4713,10 @@ will have the \f(CW\*(C`struct ev_loop *\*(C'\fR as first argument, and you can
 additional independent event loops. Otherwise there will be no support
 for multiple event loops and there is no first event loop pointer
 argument. Instead, all functions act on the single default loop.
+.Sp
+Note that \f(CW\*(C`EV_DEFAULT\*(C'\fR and \f(CW\*(C`EV_DEFAULT_\*(C'\fR will no longer provide a
+default loop when multiplicity is switched off \- you always have to
+initialise the loop manually in this case.
 .IP "\s-1EV_MINPRI\s0" 4
 .IX Item "EV_MINPRI"
 .PD 0
@@ -4533,7 +4762,7 @@ backend, use this:
 .Ve
 .Sp
 The actual value is a bitset, it can be a combination of the following
-values:
+values (by default, all of these are enabled):
 .RS 4
 .ie n .IP "1 \- faster/larger code" 4
 .el .IP "\f(CW1\fR \- faster/larger code" 4
@@ -4546,6 +4775,9 @@ code size by roughly 30% on amd64).
 When optimising for size, use of compiler flags such as \f(CW\*(C`\-Os\*(C'\fR with
 gcc is recommended, as well as \f(CW\*(C`\-DNDEBUG\*(C'\fR, as libev contains a number of
 assertions.
+.Sp
+The default is off when \f(CW\*(C`_\|_OPTIMIZE_SIZE_\|_\*(C'\fR is defined by your compiler
+(e.g. gcc with \f(CW\*(C`\-Os\*(C'\fR).
 .ie n .IP "2 \- faster/larger data structures" 4
 .el .IP "\f(CW2\fR \- faster/larger data structures" 4
 .IX Item "2 - faster/larger data structures"
@@ -4553,6 +4785,9 @@ Replaces the small 2\-heap for timer management by a faster 4\-heap, larger
 hash table sizes and so on. This will usually further increase code size
 and can additionally have an effect on the size of data structures at
 runtime.
+.Sp
+The default is off when \f(CW\*(C`_\|_OPTIMIZE_SIZE_\|_\*(C'\fR is defined by your compiler
+(e.g. gcc with \f(CW\*(C`\-Os\*(C'\fR).
 .ie n .IP "4 \- full \s-1API\s0 configuration" 4
 .el .IP "\f(CW4\fR \- full \s-1API\s0 configuration" 4
 .IX Item "4 - full API configuration"
@@ -4594,6 +4829,19 @@ when you use \f(CW\*(C`\-Wl,\-\-gc\-sections \-ffunction\-sections\*(C'\fR) func
 your program might be left out as well \- a binary starting a timer and an
 I/O watcher then might come out at only 5Kb.
 .RE
+.IP "\s-1EV_API_STATIC\s0" 4
+.IX Item "EV_API_STATIC"
+If this symbol is defined (by default it is not), then all identifiers
+will have static linkage. This means that libev will not export any
+identifiers, and you cannot link against libev anymore. This can be useful
+when you embed libev, only want to use libev functions in a single file,
+and do not want its identifiers to be visible.
+.Sp
+To use this, define \f(CW\*(C`EV_API_STATIC\*(C'\fR and include \fIev.c\fR in the file that
+wants to use libev.
+.Sp
+This option only works when libev is compiled with a C compiler, as \*(C+
+doesn't support the required declaration syntax.
 .IP "\s-1EV_AVOID_STDIO\s0" 4
 .IX Item "EV_AVOID_STDIO"
 If this is set to \f(CW1\fR at compiletime, then libev will avoid using stdio
@@ -4980,7 +5228,7 @@ model. Libev still offers limited functionality on this platform in
 the form of the \f(CW\*(C`EVBACKEND_SELECT\*(C'\fR backend, and only supports socket
 descriptors. This only applies when using Win32 natively, not when using
 e.g. cygwin. Actually, it only applies to the microsofts own compilers,
-as every compielr comes with a slightly differently broken/incompatible
+as every compiler comes with a slightly differently broken/incompatible
 environment.
 .PP
 Lifting these limitations would basically require the full
@@ -5126,8 +5374,12 @@ The type \f(CW\*(C`double\*(C'\fR is used to represent timestamps. It is require
 have at least 51 bits of mantissa (and 9 bits of exponent), which is
 good enough for at least into the year 4000 with millisecond accuracy
 (the design goal for libev). This requirement is overfulfilled by
-implementations using \s-1IEEE\s0 754, which is basically all existing ones. With
-\&\s-1IEEE\s0 754 doubles, you get microsecond accuracy until at least 2200.
+implementations using \s-1IEEE\s0 754, which is basically all existing ones.
+.Sp
+With \s-1IEEE\s0 754 doubles, you get microsecond accuracy until at least the
+year 2255 (and millisecond accuracy till the year 287396 \- by then, libev
+is either obsolete or somebody patched it to use \f(CW\*(C`long double\*(C'\fR or
+something like that, just kidding).
 .PP
 If you know of other additional requirements drop me a note.
 .SH "ALGORITHMIC COMPLEXITIES"
@@ -5191,8 +5443,9 @@ watchers becomes O(1) with respect to priority handling.
 .IX Item "Processing signals: O(max_signal_number)"
 .PD
 Sending involves a system call \fIiff\fR there were no other \f(CW\*(C`ev_async_send\*(C'\fR
-calls in the current loop iteration. Checking for async and signal events
-involves iterating over all running async watchers or all signal numbers.
+calls in the current loop iteration and the loop is currently
+blocked. Checking for async and signal events involves iterating over all
+running async watchers or all signal numbers.
 .SH "PORTING FROM LIBEV 3.X TO 4.X"
 .IX Header "PORTING FROM LIBEV 3.X TO 4.X"
 The major version 4 introduced some incompatible changes to the \s-1API\s0.
diff --git a/third_party/libev/ev.c b/third_party/libev/ev.c
index 4187b18af730dd72179d902f3cc6d0ac2637ce41..449556d4e6c79ec133ce996c481ee8722bdab379 100644
--- a/third_party/libev/ev.c
+++ b/third_party/libev/ev.c
@@ -1,7 +1,7 @@
 /*
  * libev event processing core, watcher management
  *
- * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
+ * Copyright (c) 2007,2008,2009,2010,2011,2012 Marc Alexander Lehmann <libev@schmorp.de>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -45,6 +45,12 @@
 #  include "config.h"
 # endif
 
+#if HAVE_FLOOR
+# ifndef EV_USE_FLOOR
+#  define EV_USE_FLOOR 1
+# endif
+#endif
+
 # if HAVE_CLOCK_SYSCALL
 #  ifndef EV_USE_CLOCK_SYSCALL
 #   define EV_USE_CLOCK_SYSCALL 1
@@ -55,7 +61,7 @@
 #    define EV_USE_MONOTONIC 1
 #   endif
 #  endif
-# elif !defined(EV_USE_CLOCK_SYSCALL)
+# elif !defined EV_USE_CLOCK_SYSCALL
 #  define EV_USE_CLOCK_SYSCALL 0
 # endif
 
@@ -158,7 +164,6 @@
  
 #endif
 
-#include <math.h>
 #include <stdlib.h>
 #include <string.h>
 #include <fcntl.h>
@@ -180,7 +185,16 @@
 # include "ev.h"
 #endif
 
-EV_CPP(extern "C" {)
+#if EV_NO_THREADS
+# undef EV_NO_SMP
+# define EV_NO_SMP 1
+# undef ECB_NO_THREADS
+# define ECB_NO_THREADS 1
+#endif
+#if EV_NO_SMP
+# undef EV_NO_SMP
+# define ECB_NO_SMP 1
+#endif
 
 #ifndef _WIN32
 # include <sys/time.h>
@@ -207,25 +221,25 @@ EV_CPP(extern "C" {)
 /* this block tries to deduce configuration from header-defined symbols and defaults */
 
 /* try to deduce the maximum number of signals on this platform */
-#if defined (EV_NSIG)
+#if defined EV_NSIG
 /* use what's provided */
-#elif defined (NSIG)
+#elif defined NSIG
 # define EV_NSIG (NSIG)
-#elif defined(_NSIG)
+#elif defined _NSIG
 # define EV_NSIG (_NSIG)
-#elif defined (SIGMAX)
+#elif defined SIGMAX
 # define EV_NSIG (SIGMAX+1)
-#elif defined (SIG_MAX)
+#elif defined SIG_MAX
 # define EV_NSIG (SIG_MAX+1)
-#elif defined (_SIG_MAX)
+#elif defined _SIG_MAX
 # define EV_NSIG (_SIG_MAX+1)
-#elif defined (MAXSIG)
+#elif defined MAXSIG
 # define EV_NSIG (MAXSIG+1)
-#elif defined (MAX_SIG)
+#elif defined MAX_SIG
 # define EV_NSIG (MAX_SIG+1)
-#elif defined (SIGARRAYSIZE)
+#elif defined SIGARRAYSIZE
 # define EV_NSIG (SIGARRAYSIZE) /* Assume ary[SIGARRAYSIZE] */
-#elif defined (_sys_nsig)
+#elif defined _sys_nsig
 # define EV_NSIG (_sys_nsig) /* Solaris 2.5 */
 #else
 # error "unable to find value for NSIG, please report"
@@ -234,6 +248,10 @@ EV_CPP(extern "C" {)
 # define EV_NSIG 65
 #endif
 
+#ifndef EV_USE_FLOOR
+# define EV_USE_FLOOR 0
+#endif
+
 #ifndef EV_USE_CLOCK_SYSCALL
 # if __linux && __GLIBC__ >= 2
 #  define EV_USE_CLOCK_SYSCALL EV_FEATURE_OS
@@ -243,7 +261,7 @@ EV_CPP(extern "C" {)
 #endif
 
 #ifndef EV_USE_MONOTONIC
-# if defined (_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0
+# if defined _POSIX_MONOTONIC_CLOCK && _POSIX_MONOTONIC_CLOCK >= 0
 #  define EV_USE_MONOTONIC EV_FEATURE_OS
 # else
 #  define EV_USE_MONOTONIC 0
@@ -343,7 +361,7 @@ EV_CPP(extern "C" {)
 /* on linux, we can use a (slow) syscall to avoid a dependency on pthread, */
 /* which makes programs even slower. might work on other unices, too. */
 #if EV_USE_CLOCK_SYSCALL
-# include <syscall.h>
+# include <sys/syscall.h>
 # ifdef SYS_clock_gettime
 #  define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
 #  undef EV_USE_MONOTONIC
@@ -379,7 +397,7 @@ EV_CPP(extern "C" {)
 
 #if !EV_USE_NANOSLEEP
 /* hp-ux has it in sys/time.h, which we unconditionally include above */
-# if !defined(_WIN32) && !defined(__hpux)
+# if !defined _WIN32 && !defined __hpux
 #  include <sys/select.h>
 # endif
 #endif
@@ -445,14 +463,11 @@ struct signalfd_siginfo
 #endif
 
 /*
- * This is used to avoid floating point rounding problems.
- * It is added to ev_rt_now when scheduling periodics
- * to ensure progress, time-wise, even when rounding
- * errors are against us.
+ * This is used to work around floating point rounding problems.
  * This value is good at least till the year 4000.
- * Better solutions welcome.
  */
-#define TIME_EPSILON  0.0001220703125 /* 1/8192 */
+#define MIN_INTERVAL  0.0001220703125 /* 1/2**13, good till 4000 */
+/*#define MIN_INTERVAL  0.00000095367431640625 /* 1/2**20, good till 2200 */
 
 #define MIN_TIMEJUMP  1. /* minimum timejump that gets detected (if monotonic clock available) */
 #define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
@@ -460,23 +475,492 @@ struct signalfd_siginfo
 #define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
 #define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
 
-#if __GNUC__ >= 4
-# define expect(expr,value)         __builtin_expect ((expr),(value))
-# define noinline                   __attribute__ ((noinline))
+/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
+/* ECB.H BEGIN */
+/*
+ * libecb - http://software.schmorp.de/pkg/libecb
+ *
+ * Copyright (©) 2009-2012 Marc Alexander Lehmann <libecb@schmorp.de>
+ * Copyright (©) 2011 Emanuele Giaquinta
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modifica-
+ * tion, are permitted provided that the following conditions are met:
+ *
+ *   1.  Redistributions of source code must retain the above copyright notice,
+ *       this list of conditions and the following disclaimer.
+ *
+ *   2.  Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ECB_H
+#define ECB_H
+
+#ifdef _WIN32
+  typedef   signed char   int8_t;
+  typedef unsigned char  uint8_t;
+  typedef   signed short  int16_t;
+  typedef unsigned short uint16_t;
+  typedef   signed int    int32_t;
+  typedef unsigned int   uint32_t;
+  #if __GNUC__
+    typedef   signed long long int64_t;
+    typedef unsigned long long uint64_t;
+  #else /* _MSC_VER || __BORLANDC__ */
+    typedef   signed __int64   int64_t;
+    typedef unsigned __int64   uint64_t;
+  #endif
 #else
-# define expect(expr,value)         (expr)
-# define noinline
-# if __STDC_VERSION__ < 199901L && __GNUC__ < 2
-#  define inline
-# endif
+  #include <inttypes.h>
+#endif
+
+/* many compilers define _GNUC_ to some versions but then only implement
+ * what their idiot authors think are the "more important" extensions,
+ * causing enormous grief in return for some better fake benchmark numbers.
+ * or so.
+ * we try to detect these and simply assume they are not gcc - if they have
+ * an issue with that they should have done it right in the first place.
+ */
+#ifndef ECB_GCC_VERSION
+  #if !defined __GNUC_MINOR__ || defined __INTEL_COMPILER || defined __SUNPRO_C || defined __SUNPRO_CC || defined __llvm__ || defined __clang__
+    #define ECB_GCC_VERSION(major,minor) 0
+  #else
+    #define ECB_GCC_VERSION(major,minor) (__GNUC__ > (major) || (__GNUC__ == (major) && __GNUC_MINOR__ >= (minor)))
+  #endif
+#endif
+
+/*****************************************************************************/
+
+/* ECB_NO_THREADS - ecb is not used by multiple threads, ever */
+/* ECB_NO_SMP     - ecb might be used in multiple threads, but only on a single cpu */
+
+#if ECB_NO_THREADS
+# define ECB_NO_SMP 1
+#endif
+
+#if ECB_NO_THREADS || ECB_NO_SMP
+  #define ECB_MEMORY_FENCE do { } while (0)
+#endif
+
+#ifndef ECB_MEMORY_FENCE
+  #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
+    #if __i386 || __i386__
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory")
+      #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE /* non-lock xchg might be enough */
+      #define ECB_MEMORY_FENCE_RELEASE do { } while (0) /* unlikely to change in future cpus */
+    #elif __amd64 || __amd64__ || __x86_64 || __x86_64__
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("mfence" : : : "memory")
+      #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("lfence" : : : "memory")
+      #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("sfence") /* play safe - not needed in any current cpu */
+    #elif __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("sync" : : : "memory")
+    #elif defined __ARM_ARCH_6__  || defined __ARM_ARCH_6J__  \
+       || defined __ARM_ARCH_6K__ || defined __ARM_ARCH_6ZK__
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("mcr p15,0,%0,c7,c10,5" : : "r" (0) : "memory")
+    #elif defined __ARM_ARCH_7__  || defined __ARM_ARCH_7A__  \
+       || defined __ARM_ARCH_7M__ || defined __ARM_ARCH_7R__ 
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("dmb" : : : "memory")
+    #elif __sparc || __sparc__
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad | " : : : "memory")
+      #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad"                               : : : "memory")
+      #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore |             #StoreStore")
+    #elif defined __s390__ || defined __s390x__
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("bcr 15,0" : : : "memory")
+    #elif defined __mips__
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("sync" : : : "memory")
+    #elif defined __alpha__
+      #define ECB_MEMORY_FENCE         __asm__ __volatile__ ("mb" : : : "memory")
+    #endif
+  #endif
+#endif
+
+#ifndef ECB_MEMORY_FENCE
+  #if ECB_GCC_VERSION(4,4) || defined __INTEL_COMPILER || defined __clang__
+    #define ECB_MEMORY_FENCE         __sync_synchronize ()
+    /*#define ECB_MEMORY_FENCE_ACQUIRE ({ char dummy = 0; __sync_lock_test_and_set (&dummy, 1); }) */
+    /*#define ECB_MEMORY_FENCE_RELEASE ({ char dummy = 1; __sync_lock_release      (&dummy   ); }) */
+  #elif _MSC_VER >= 1400 /* VC++ 2005 */
+    #pragma intrinsic(_ReadBarrier,_WriteBarrier,_ReadWriteBarrier)
+    #define ECB_MEMORY_FENCE         _ReadWriteBarrier ()
+    #define ECB_MEMORY_FENCE_ACQUIRE _ReadWriteBarrier () /* according to msdn, _ReadBarrier is not a load fence */
+    #define ECB_MEMORY_FENCE_RELEASE _WriteBarrier ()
+  #elif defined _WIN32
+    #include <WinNT.h>
+    #define ECB_MEMORY_FENCE         MemoryBarrier () /* actually just xchg on x86... scary */
+  #elif __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110
+    #include <mbarrier.h>
+    #define ECB_MEMORY_FENCE         __machine_rw_barrier ()
+    #define ECB_MEMORY_FENCE_ACQUIRE __machine_r_barrier  ()
+    #define ECB_MEMORY_FENCE_RELEASE __machine_w_barrier  ()
+  #elif __xlC__
+    #define ECB_MEMORY_FENCE         __sync ()
+  #endif
+#endif
+
+#ifndef ECB_MEMORY_FENCE
+  #if !ECB_AVOID_PTHREADS
+    /*
+     * if you get undefined symbol references to pthread_mutex_lock,
+     * or failure to find pthread.h, then you should implement
+     * the ECB_MEMORY_FENCE operations for your cpu/compiler
+     * OR provide pthread.h and link against the posix thread library
+     * of your system.
+     */
+    #include <pthread.h>
+    #define ECB_NEEDS_PTHREADS 1
+    #define ECB_MEMORY_FENCE_NEEDS_PTHREADS 1
+
+    static pthread_mutex_t ecb_mf_lock = PTHREAD_MUTEX_INITIALIZER;
+    #define ECB_MEMORY_FENCE do { pthread_mutex_lock (&ecb_mf_lock); pthread_mutex_unlock (&ecb_mf_lock); } while (0)
+  #endif
+#endif
+
+#if !defined ECB_MEMORY_FENCE_ACQUIRE && defined ECB_MEMORY_FENCE
+  #define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
+#endif
+
+#if !defined ECB_MEMORY_FENCE_RELEASE && defined ECB_MEMORY_FENCE
+  #define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
+#endif
+
+/*****************************************************************************/
+
+#define ECB_C99 (__STDC_VERSION__ >= 199901L)
+
+#if __cplusplus
+  #define ecb_inline static inline
+#elif ECB_GCC_VERSION(2,5)
+  #define ecb_inline static __inline__
+#elif ECB_C99
+  #define ecb_inline static inline
+#else
+  #define ecb_inline static
+#endif
+
+#if ECB_GCC_VERSION(3,3)
+  #define ecb_restrict __restrict__
+#elif ECB_C99
+  #define ecb_restrict restrict
+#else
+  #define ecb_restrict
+#endif
+
+typedef int ecb_bool;
+
+#define ECB_CONCAT_(a, b) a ## b
+#define ECB_CONCAT(a, b) ECB_CONCAT_(a, b)
+#define ECB_STRINGIFY_(a) # a
+#define ECB_STRINGIFY(a) ECB_STRINGIFY_(a)
+
+#define ecb_function_ ecb_inline
+
+#if ECB_GCC_VERSION(3,1)
+  #define ecb_attribute(attrlist)        __attribute__(attrlist)
+  #define ecb_is_constant(expr)          __builtin_constant_p (expr)
+  #define ecb_expect(expr,value)         __builtin_expect ((expr),(value))
+  #define ecb_prefetch(addr,rw,locality) __builtin_prefetch (addr, rw, locality)
+#else
+  #define ecb_attribute(attrlist)
+  #define ecb_is_constant(expr)          0
+  #define ecb_expect(expr,value)         (expr)
+  #define ecb_prefetch(addr,rw,locality)
+#endif
+
+/* no emulation for ecb_decltype */
+#if ECB_GCC_VERSION(4,5)
+  #define ecb_decltype(x) __decltype(x)
+#elif ECB_GCC_VERSION(3,0)
+  #define ecb_decltype(x) __typeof(x)
+#endif
+
+#define ecb_noinline   ecb_attribute ((__noinline__))
+#define ecb_noreturn   ecb_attribute ((__noreturn__))
+#define ecb_unused     ecb_attribute ((__unused__))
+#define ecb_const      ecb_attribute ((__const__))
+#define ecb_pure       ecb_attribute ((__pure__))
+
+#if ECB_GCC_VERSION(4,3)
+  #define ecb_artificial ecb_attribute ((__artificial__))
+  #define ecb_hot        ecb_attribute ((__hot__))
+  #define ecb_cold       ecb_attribute ((__cold__))
+#else
+  #define ecb_artificial
+  #define ecb_hot
+  #define ecb_cold
+#endif
+
+/* put around conditional expressions if you are very sure that the  */
+/* expression is mostly true or mostly false. note that these return */
+/* booleans, not the expression.                                     */
+#define ecb_expect_false(expr) ecb_expect (!!(expr), 0)
+#define ecb_expect_true(expr)  ecb_expect (!!(expr), 1)
+/* for compatibility to the rest of the world */
+#define ecb_likely(expr)   ecb_expect_true  (expr)
+#define ecb_unlikely(expr) ecb_expect_false (expr)
+
+/* count trailing zero bits and count # of one bits */
+#if ECB_GCC_VERSION(3,4)
+  /* we assume int == 32 bit, long == 32 or 64 bit and long long == 64 bit */
+  #define ecb_ld32(x)      (__builtin_clz      (x) ^ 31)
+  #define ecb_ld64(x)      (__builtin_clzll    (x) ^ 63)
+  #define ecb_ctz32(x)      __builtin_ctz      (x)
+  #define ecb_ctz64(x)      __builtin_ctzll    (x)
+  #define ecb_popcount32(x) __builtin_popcount (x)
+  /* no popcountll */
+#else
+  ecb_function_ int ecb_ctz32 (uint32_t x) ecb_const;
+  ecb_function_ int
+  ecb_ctz32 (uint32_t x)
+  {
+    int r = 0;
+
+    x &= ~x + 1; /* this isolates the lowest bit */
+
+#if ECB_branchless_on_i386
+    r += !!(x & 0xaaaaaaaa) << 0;
+    r += !!(x & 0xcccccccc) << 1;
+    r += !!(x & 0xf0f0f0f0) << 2;
+    r += !!(x & 0xff00ff00) << 3;
+    r += !!(x & 0xffff0000) << 4;
+#else
+    if (x & 0xaaaaaaaa) r +=  1;
+    if (x & 0xcccccccc) r +=  2;
+    if (x & 0xf0f0f0f0) r +=  4;
+    if (x & 0xff00ff00) r +=  8;
+    if (x & 0xffff0000) r += 16;
 #endif
 
-#define expect_false(expr) expect ((expr) != 0, 0)
-#define expect_true(expr)  expect ((expr) != 0, 1)
-#define inline_size        static inline
+    return r;
+  }
+
+  ecb_function_ int ecb_ctz64 (uint64_t x) ecb_const;
+  ecb_function_ int
+  ecb_ctz64 (uint64_t x)
+  {
+    int shift = x & 0xffffffffU ? 0 : 32;
+    return ecb_ctz32 (x >> shift) + shift;
+  }
+
+  ecb_function_ int ecb_popcount32 (uint32_t x) ecb_const;
+  ecb_function_ int
+  ecb_popcount32 (uint32_t x)
+  {
+    x -=  (x >> 1) & 0x55555555;
+    x  = ((x >> 2) & 0x33333333) + (x & 0x33333333);
+    x  = ((x >> 4) + x) & 0x0f0f0f0f;
+    x *= 0x01010101;
+
+    return x >> 24;
+  }
+
+  ecb_function_ int ecb_ld32 (uint32_t x) ecb_const;
+  ecb_function_ int ecb_ld32 (uint32_t x)
+  {
+    int r = 0;
+
+    if (x >> 16) { x >>= 16; r += 16; }
+    if (x >>  8) { x >>=  8; r +=  8; }
+    if (x >>  4) { x >>=  4; r +=  4; }
+    if (x >>  2) { x >>=  2; r +=  2; }
+    if (x >>  1) {           r +=  1; }
+
+    return r;
+  }
+
+  ecb_function_ int ecb_ld64 (uint64_t x) ecb_const;
+  ecb_function_ int ecb_ld64 (uint64_t x)
+  {
+    int r = 0;
+
+    if (x >> 32) { x >>= 32; r += 32; }
+
+    return r + ecb_ld32 (x);
+  }
+#endif
+
+ecb_function_ uint8_t  ecb_bitrev8  (uint8_t  x) ecb_const;
+ecb_function_ uint8_t  ecb_bitrev8  (uint8_t  x)
+{
+  return (  (x * 0x0802U & 0x22110U)
+          | (x * 0x8020U & 0x88440U)) * 0x10101U >> 16; 
+}
+
+ecb_function_ uint16_t ecb_bitrev16 (uint16_t x) ecb_const;
+ecb_function_ uint16_t ecb_bitrev16 (uint16_t x)
+{
+  x = ((x >>  1) &     0x5555) | ((x &     0x5555) <<  1);
+  x = ((x >>  2) &     0x3333) | ((x &     0x3333) <<  2);
+  x = ((x >>  4) &     0x0f0f) | ((x &     0x0f0f) <<  4);
+  x = ( x >>  8              ) | ( x               <<  8);
+
+  return x;
+}
+
+ecb_function_ uint32_t ecb_bitrev32 (uint32_t x) ecb_const;
+ecb_function_ uint32_t ecb_bitrev32 (uint32_t x)
+{
+  x = ((x >>  1) & 0x55555555) | ((x & 0x55555555) <<  1);
+  x = ((x >>  2) & 0x33333333) | ((x & 0x33333333) <<  2);
+  x = ((x >>  4) & 0x0f0f0f0f) | ((x & 0x0f0f0f0f) <<  4);
+  x = ((x >>  8) & 0x00ff00ff) | ((x & 0x00ff00ff) <<  8);
+  x = ( x >> 16              ) | ( x               << 16);
+
+  return x;
+}
+
+/* popcount64 is only available on 64 bit cpus as gcc builtin */
+/* so for this version we are lazy */
+ecb_function_ int ecb_popcount64 (uint64_t x) ecb_const;
+ecb_function_ int
+ecb_popcount64 (uint64_t x)
+{
+  return ecb_popcount32 (x) + ecb_popcount32 (x >> 32);
+}
+
+ecb_inline uint8_t  ecb_rotl8  (uint8_t  x, unsigned int count) ecb_const;
+ecb_inline uint8_t  ecb_rotr8  (uint8_t  x, unsigned int count) ecb_const;
+ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) ecb_const;
+ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) ecb_const;
+ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) ecb_const;
+ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) ecb_const;
+ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) ecb_const;
+ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) ecb_const;
+
+ecb_inline uint8_t  ecb_rotl8  (uint8_t  x, unsigned int count) { return (x >> ( 8 - count)) | (x << count); }
+ecb_inline uint8_t  ecb_rotr8  (uint8_t  x, unsigned int count) { return (x << ( 8 - count)) | (x >> count); }
+ecb_inline uint16_t ecb_rotl16 (uint16_t x, unsigned int count) { return (x >> (16 - count)) | (x << count); }
+ecb_inline uint16_t ecb_rotr16 (uint16_t x, unsigned int count) { return (x << (16 - count)) | (x >> count); }
+ecb_inline uint32_t ecb_rotl32 (uint32_t x, unsigned int count) { return (x >> (32 - count)) | (x << count); }
+ecb_inline uint32_t ecb_rotr32 (uint32_t x, unsigned int count) { return (x << (32 - count)) | (x >> count); }
+ecb_inline uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { return (x >> (64 - count)) | (x << count); }
+ecb_inline uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); }
+
+#if ECB_GCC_VERSION(4,3)
+  #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16)
+  #define ecb_bswap32(x)  __builtin_bswap32 (x)
+  #define ecb_bswap64(x)  __builtin_bswap64 (x)
+#else
+  ecb_function_ uint16_t ecb_bswap16 (uint16_t x) ecb_const;
+  ecb_function_ uint16_t
+  ecb_bswap16 (uint16_t x)
+  {
+    return ecb_rotl16 (x, 8);
+  }
+
+  ecb_function_ uint32_t ecb_bswap32 (uint32_t x) ecb_const;
+  ecb_function_ uint32_t
+  ecb_bswap32 (uint32_t x)
+  {
+    return (((uint32_t)ecb_bswap16 (x)) << 16) | ecb_bswap16 (x >> 16);
+  }
+
+  ecb_function_ uint64_t ecb_bswap64 (uint64_t x) ecb_const;
+  ecb_function_ uint64_t
+  ecb_bswap64 (uint64_t x)
+  {
+    return (((uint64_t)ecb_bswap32 (x)) << 32) | ecb_bswap32 (x >> 32);
+  }
+#endif
+
+#if ECB_GCC_VERSION(4,5)
+  #define ecb_unreachable() __builtin_unreachable ()
+#else
+  /* this seems to work fine, but gcc always emits a warning for it :/ */
+  ecb_inline void ecb_unreachable (void) ecb_noreturn;
+  ecb_inline void ecb_unreachable (void) { }
+#endif
+
+/* try to tell the compiler that some condition is definitely true */
+#define ecb_assume(cond) do { if (!(cond)) ecb_unreachable (); } while (0)
+
+ecb_inline unsigned char ecb_byteorder_helper (void) ecb_const;
+ecb_inline unsigned char
+ecb_byteorder_helper (void)
+{
+  const uint32_t u = 0x11223344;
+  return *(unsigned char *)&u;
+}
+
+ecb_inline ecb_bool ecb_big_endian    (void) ecb_const;
+ecb_inline ecb_bool ecb_big_endian    (void) { return ecb_byteorder_helper () == 0x11; }
+ecb_inline ecb_bool ecb_little_endian (void) ecb_const;
+ecb_inline ecb_bool ecb_little_endian (void) { return ecb_byteorder_helper () == 0x44; }
+
+#if ECB_GCC_VERSION(3,0) || ECB_C99
+  #define ecb_mod(m,n) ((m) % (n) + ((m) % (n) < 0 ? (n) : 0))
+#else
+  #define ecb_mod(m,n) ((m) < 0 ? ((n) - 1 - ((-1 - (m)) % (n))) : ((m) % (n)))
+#endif
+
+#if __cplusplus
+  template<typename T>
+  static inline T ecb_div_rd (T val, T div)
+  {
+    return val < 0 ? - ((-val + div - 1) / div) : (val          ) / div;
+  }
+  template<typename T>
+  static inline T ecb_div_ru (T val, T div)
+  {
+    return val < 0 ? - ((-val          ) / div) : (val + div - 1) / div;
+  }
+#else
+  #define ecb_div_rd(val,div) ((val) < 0 ? - ((-(val) + (div) - 1) / (div)) : ((val)            ) / (div))
+  #define ecb_div_ru(val,div) ((val) < 0 ? - ((-(val)            ) / (div)) : ((val) + (div) - 1) / (div))
+#endif
+
+#if ecb_cplusplus_does_not_suck
+  /* does not work for local types (http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2657.htm) */
+  template<typename T, int N>
+  static inline int ecb_array_length (const T (&arr)[N])
+  {
+    return N;
+  }
+#else
+  #define ecb_array_length(name) (sizeof (name) / sizeof (name [0]))
+#endif
+
+#endif
+
+/* ECB.H END */
+
+#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
+/* if your architecture doesn't need memory fences, e.g. because it is
+ * single-cpu/core, or if you use libev in a project that doesn't use libev
+ * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
+ * libev, in which cases the memory fences become nops.
+ * alternatively, you can remove this #error and link against libpthread,
+ * which will then provide the memory fences.
+ */
+# error "memory fences not defined for your architecture, please report"
+#endif
+
+#ifndef ECB_MEMORY_FENCE
+# define ECB_MEMORY_FENCE do { } while (0)
+# define ECB_MEMORY_FENCE_ACQUIRE ECB_MEMORY_FENCE
+# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
+#endif
+
+#define expect_false(cond) ecb_expect_false (cond)
+#define expect_true(cond)  ecb_expect_true  (cond)
+#define noinline           ecb_noinline
+
+#define inline_size        ecb_inline
 
 #if EV_FEATURE_CODE
-# define inline_speed      static inline
+# define inline_speed      ecb_inline
 #else
 # define inline_speed      static noinline
 #endif
@@ -525,11 +1009,59 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
 
 /*****************************************************************************/
 
+/* define a suitable floor function (only used by periodics atm) */
+
+#if EV_USE_FLOOR
+# include <math.h>
+# define ev_floor(v) floor (v)
+#else
+
+#include <float.h>
+
+/* a floor() replacement function, should be independent of ev_tstamp type */
+static ev_tstamp noinline
+ev_floor (ev_tstamp v)
+{
+  /* the choice of shift factor is not terribly important */
+#if FLT_RADIX != 2 /* assume FLT_RADIX == 10 */
+  const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 10000000000000000000. : 1000000000.;
+#else
+  const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
+#endif
+
+  /* argument too large for an unsigned long? */
+  if (expect_false (v >= shift))
+    {
+      ev_tstamp f;
+
+      if (v == v - 1.)
+        return v; /* very large number */
+
+      f = shift * ev_floor (v * (1. / shift));
+      return f + ev_floor (v - f);
+    }
+
+  /* special treatment for negative args? */
+  if (expect_false (v < 0.))
+    {
+      ev_tstamp f = -ev_floor (-v);
+
+      return f - (f == v ? 0 : 1);
+    }
+
+  /* fits into an unsigned long */
+  return (unsigned long)v;
+}
+
+#endif
+
+/*****************************************************************************/
+
 #ifdef __linux
 # include <sys/utsname.h>
 #endif
 
-static unsigned int noinline
+static unsigned int noinline ecb_cold
 ev_linux_version (void)
 {
 #ifdef __linux
@@ -568,22 +1100,22 @@ ev_linux_version (void)
 /*****************************************************************************/
 
 #if EV_AVOID_STDIO
-static void noinline
+static void noinline ecb_cold
 ev_printerr (const char *msg)
 {
   write (STDERR_FILENO, msg, strlen (msg));
 }
 #endif
 
-static void (*syserr_cb)(const char *msg);
+static void (*syserr_cb)(const char *msg) EV_THROW;
 
-void
-ev_set_syserr_cb (void (*cb)(const char *msg))
+void ecb_cold
+ev_set_syserr_cb (void (*cb)(const char *msg)) EV_THROW
 {
   syserr_cb = cb;
 }
 
-static void noinline
+static void noinline ecb_cold
 ev_syserr (const char *msg)
 {
   if (!msg)
@@ -624,10 +1156,10 @@ ev_realloc_emul (void *ptr, long size)
 #endif
 }
 
-static void *(*alloc)(void *ptr, long size) = ev_realloc_emul;
+static void *(*alloc)(void *ptr, long size) EV_THROW = ev_realloc_emul;
 
-void
-ev_set_allocator (void *(*cb)(void *ptr, long size))
+void ecb_cold
+ev_set_allocator (void *(*cb)(void *ptr, long size)) EV_THROW
 {
   alloc = cb;
 }
@@ -725,11 +1257,11 @@ typedef struct
   #include "ev_wrap.h"
 
   static struct ev_loop default_loop_struct;
-  struct ev_loop *ev_default_loop_ptr;
+  EV_API_DECL struct ev_loop *ev_default_loop_ptr = 0; /* needs to be initialised to make it a definition despite extern */
 
 #else
 
-  ev_tstamp ev_rt_now;
+  EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
   #define VAR(name,decl) static decl;
     #include "ev_vars.h"
   #undef VAR
@@ -754,7 +1286,7 @@ typedef struct
 
 #ifndef EV_HAVE_EV_TIME
 ev_tstamp
-ev_time (void)
+ev_time (void) EV_THROW
 {
 #if EV_USE_REALTIME
   if (expect_true (have_realtime))
@@ -788,14 +1320,14 @@ get_clock (void)
 
 #if EV_MULTIPLICITY
 ev_tstamp
-ev_now (EV_P)
+ev_now (EV_P) EV_THROW
 {
   return ev_rt_now;
 }
 #endif
 
 void
-ev_sleep (ev_tstamp delay)
+ev_sleep (ev_tstamp delay) EV_THROW
 {
   if (delay > 0.)
     {
@@ -804,7 +1336,7 @@ ev_sleep (ev_tstamp delay)
 
       EV_TS_SET (ts, delay);
       nanosleep (&ts, 0);
-#elif defined(_WIN32)
+#elif defined _WIN32
       Sleep ((unsigned long)(delay * 1e3));
 #else
       struct timeval tv;
@@ -818,14 +1350,6 @@ ev_sleep (ev_tstamp delay)
     }
 }
 
-inline_speed int
-ev_timeout_to_ms (ev_tstamp timeout)
-{
-  int ms = timeout * 1000. + .999999;
-
-  return expect_true (ms) ? ms : timeout < 1e-6 ? 0 : 1;
-}
-
 /*****************************************************************************/
 
 #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
@@ -841,7 +1365,7 @@ array_nextsize (int elem, int cur, int cnt)
     ncur <<= 1;
   while (cnt > ncur);
 
-  /* if size is large, round to MALLOC_ROUND - 4 * longs to accomodate malloc overhead */
+  /* if size is large, round to MALLOC_ROUND - 4 * longs to accommodate malloc overhead */
   if (elem * ncur > MALLOC_ROUND - sizeof (void *) * 4)
     {
       ncur *= elem;
@@ -853,7 +1377,7 @@ array_nextsize (int elem, int cur, int cnt)
   return ncur;
 }
 
-static noinline void *
+static void * noinline ecb_cold
 array_realloc (int elem, void *base, int *cur, int cnt)
 {
   *cur = array_nextsize (elem, *cur, cnt);
@@ -866,7 +1390,7 @@ array_realloc (int elem, void *base, int *cur, int cnt)
 #define array_needsize(type,base,cur,cnt,init)			\
   if (expect_false ((cnt) > (cur)))				\
     {								\
-      int ocur_ = (cur);					\
+      int ecb_unused ocur_ = (cur);					\
       (base) = (type *)array_realloc				\
          (sizeof (type), (base), &(cur), (cnt));		\
       init ((base) + (ocur_), (cur) - ocur_);			\
@@ -894,7 +1418,7 @@ pendingcb (EV_P_ ev_prepare *w, int revents)
 }
 
 void noinline
-ev_feed_event (EV_P_ void *w, int revents)
+ev_feed_event (EV_P_ void *w, int revents) EV_THROW
 {
   W w_ = (W)w;
   int pri = ABSPRI (w_);
@@ -963,7 +1487,7 @@ fd_event (EV_P_ int fd, int revents)
 }
 
 void
-ev_feed_fd_event (EV_P_ int fd, int revents)
+ev_feed_fd_event (EV_P_ int fd, int revents) EV_THROW
 {
   if (fd >= 0 && fd < anfdmax)
     fd_event_nocheck (EV_A_ fd, revents);
@@ -982,7 +1506,7 @@ fd_reify (EV_P)
       int fd = fdchanges [i];
       ANFD *anfd = anfds + fd;
 
-      if (anfd->reify & EV__IOFDSET)
+      if (anfd->reify & EV__IOFDSET && anfd->head)
         {
           SOCKET handle = EV_FD_TO_WIN32_HANDLE (fd);
 
@@ -1046,7 +1570,7 @@ fd_change (EV_P_ int fd, int flags)
 }
 
 /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
-inline_speed void
+inline_speed void ecb_cold
 fd_kill (EV_P_ int fd)
 {
   ev_io *w;
@@ -1059,7 +1583,7 @@ fd_kill (EV_P_ int fd)
 }
 
 /* check whether the given fd is actually valid, for error recovery */
-inline_size int
+inline_size int ecb_cold
 fd_valid (int fd)
 {
 #ifdef _WIN32
@@ -1070,7 +1594,7 @@ fd_valid (int fd)
 }
 
 /* called on EBADF to verify fds */
-static void noinline
+static void noinline ecb_cold
 fd_ebadf (EV_P)
 {
   int fd;
@@ -1082,7 +1606,7 @@ fd_ebadf (EV_P)
 }
 
 /* called on ENOMEM in select/poll to kill some fds and retry */
-static void noinline
+static void noinline ecb_cold
 fd_enomem (EV_P)
 {
   int fd;
@@ -1287,7 +1811,7 @@ static ANSIG signals [EV_NSIG - 1];
 
 #if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
 
-static void noinline
+static void noinline ecb_cold
 evpipe_init (EV_P)
 {
   if (!ev_is_active (&pipe_w))
@@ -1319,15 +1843,27 @@ evpipe_init (EV_P)
     }
 }
 
-inline_size void
+inline_speed void
 evpipe_write (EV_P_ EV_ATOMIC_T *flag)
 {
-  if (!*flag)
+  if (expect_true (*flag))
+    return;
+
+  *flag = 1;
+
+  ECB_MEMORY_FENCE_RELEASE; /* make sure flag is visible before the wakeup */
+
+  pipe_write_skipped = 1;
+
+  ECB_MEMORY_FENCE; /* make sure pipe_write_skipped is visible before we check pipe_write_wanted */
+
+  if (pipe_write_wanted)
     {
-      int old_errno = errno; /* save errno because write might clobber it */
-      char dummy;
+      int old_errno;
+
+      pipe_write_skipped = 0; /* just an optimisation, no fence needed */
 
-      *flag = 1;
+      old_errno = errno; /* save errno because write will clobber it */
 
 #if EV_USE_EVENTFD
       if (evfd >= 0)
@@ -1337,12 +1873,16 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
         }
       else
 #endif
-        /* win32 people keep sending patches that change this write() to send() */
-        /* and then run away. but send() is wrong, it wants a socket handle on win32 */
-        /* so when you think this write should be a send instead, please find out */
-        /* where your send() is from - it's definitely not the microsoft send, and */
-        /* tell me. thank you. */
-        write (evpipe [1], &dummy, 1);
+        {
+          /* win32 people keep sending patches that change this write() to send() */
+          /* and then run away. but send() is wrong, it wants a socket handle on win32 */
+          /* so when you think this write should be a send instead, please find out */
+          /* where your send() is from - it's definitely not the microsoft send, and */
+          /* tell me. thank you. */
+          /* it might be that your problem is that your environment needs EV_USE_WSASOCKET */
+          /* check the ev documentation on how to use this flag */
+          write (evpipe [1], &(evpipe [1]), 1);
+        }
 
       errno = old_errno;
     }
@@ -1355,20 +1895,25 @@ pipecb (EV_P_ ev_io *iow, int revents)
 {
   int i;
 
-#if EV_USE_EVENTFD
-  if (evfd >= 0)
+  if (revents & EV_READ)
     {
-      uint64_t counter;
-      read (evfd, &counter, sizeof (uint64_t));
-    }
-  else
+#if EV_USE_EVENTFD
+      if (evfd >= 0)
+        {
+          uint64_t counter;
+          read (evfd, &counter, sizeof (uint64_t));
+        }
+      else
 #endif
-    {
-      char dummy;
-      /* see discussion in evpipe_write when you think this read should be recv in win32 */
-      read (evpipe [0], &dummy, 1);
+        {
+          char dummy;
+          /* see discussion in evpipe_write when you think this read should be recv in win32 */
+          read (evpipe [0], &dummy, 1);
+        }
     }
 
+  pipe_write_skipped = 0;
+
 #if EV_SIGNAL_ENABLE
   if (sig_pending)
     {
@@ -1398,7 +1943,7 @@ pipecb (EV_P_ ev_io *iow, int revents)
 /*****************************************************************************/
 
 void
-ev_feed_signal (int signum)
+ev_feed_signal (int signum) EV_THROW
 {
 #if EV_MULTIPLICITY
   EV_P = signals [signum - 1].loop;
@@ -1407,6 +1952,9 @@ ev_feed_signal (int signum)
     return;
 #endif
 
+  if (!ev_active (&pipe_w))
+    return;
+
   signals [signum - 1].pending = 1;
   evpipe_write (EV_A_ &sig_pending);
 }
@@ -1422,7 +1970,7 @@ ev_sighandler (int signum)
 }
 
 void noinline
-ev_feed_signal_event (EV_P_ int signum)
+ev_feed_signal_event (EV_P_ int signum) EV_THROW
 {
   WL w;
 
@@ -1547,20 +2095,20 @@ childcb (EV_P_ ev_signal *sw, int revents)
 # include "ev_select.c"
 #endif
 
-int
-ev_version_major (void)
+int ecb_cold
+ev_version_major (void) EV_THROW
 {
   return EV_VERSION_MAJOR;
 }
 
-int
-ev_version_minor (void)
+int ecb_cold
+ev_version_minor (void) EV_THROW
 {
   return EV_VERSION_MINOR;
 }
 
 /* return true if we are running with elevated privileges and should ignore env variables */
-int inline_size
+int inline_size ecb_cold
 enable_secure (void)
 {
 #ifdef _WIN32
@@ -1571,8 +2119,8 @@ enable_secure (void)
 #endif
 }
 
-unsigned int
-ev_supported_backends (void)
+unsigned int ecb_cold
+ev_supported_backends (void) EV_THROW
 {
   unsigned int flags = 0;
 
@@ -1585,8 +2133,8 @@ ev_supported_backends (void)
   return flags;
 }
 
-unsigned int
-ev_recommended_backends (void)
+unsigned int ecb_cold
+ev_recommended_backends (void) EV_THROW
 {
   unsigned int flags = ev_supported_backends ();
 
@@ -1607,8 +2155,8 @@ ev_recommended_backends (void)
   return flags;
 }
 
-unsigned int
-ev_embeddable_backends (void)
+unsigned int ecb_cold
+ev_embeddable_backends (void) EV_THROW
 {
   int flags = EVBACKEND_EPOLL | EVBACKEND_KQUEUE | EVBACKEND_PORT;
 
@@ -1620,54 +2168,56 @@ ev_embeddable_backends (void)
 }
 
 unsigned int
-ev_backend (EV_P)
+ev_backend (EV_P) EV_THROW
 {
   return backend;
 }
 
 #if EV_FEATURE_API
 unsigned int
-ev_iteration (EV_P)
+ev_iteration (EV_P) EV_THROW
 {
   return loop_count;
 }
 
 unsigned int
-ev_depth (EV_P)
+ev_depth (EV_P) EV_THROW
 {
   return loop_depth;
 }
 
 void
-ev_set_io_collect_interval (EV_P_ ev_tstamp interval)
+ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
 {
   io_blocktime = interval;
 }
 
 void
-ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval)
+ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW
 {
   timeout_blocktime = interval;
 }
 
 void
-ev_set_userdata (EV_P_ void *data)
+ev_set_userdata (EV_P_ void *data) EV_THROW
 {
   userdata = data;
 }
 
 void *
-ev_userdata (EV_P)
+ev_userdata (EV_P) EV_THROW
 {
   return userdata;
 }
 
-void ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P))
+void
+ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P)) EV_THROW
 {
   invoke_cb = invoke_pending_cb;
 }
 
-void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P))
+void
+ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_THROW, void (*acquire)(EV_P) EV_THROW) EV_THROW
 {
   release_cb = release;
   acquire_cb = acquire;
@@ -1675,8 +2225,8 @@ void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P))
 #endif
 
 /* initialise a loop structure, must be zero-initialised */
-static void noinline
-loop_init (EV_P_ unsigned int flags)
+static void noinline ecb_cold
+loop_init (EV_P_ unsigned int flags) EV_THROW
 {
   if (!backend)
     {
@@ -1713,27 +2263,29 @@ loop_init (EV_P_ unsigned int flags)
           && getenv ("LIBEV_FLAGS"))
         flags = atoi (getenv ("LIBEV_FLAGS"));
 
-      ev_rt_now         = ev_time ();
-      mn_now            = get_clock ();
-      now_floor         = mn_now;
-      rtmn_diff         = ev_rt_now - mn_now;
+      ev_rt_now          = ev_time ();
+      mn_now             = get_clock ();
+      now_floor          = mn_now;
+      rtmn_diff          = ev_rt_now - mn_now;
 #if EV_FEATURE_API
-      invoke_cb         = ev_invoke_pending;
+      invoke_cb          = ev_invoke_pending;
 #endif
 
-      io_blocktime      = 0.;
-      timeout_blocktime = 0.;
-      backend           = 0;
-      backend_fd        = -1;
-      sig_pending       = 0;
+      io_blocktime       = 0.;
+      timeout_blocktime  = 0.;
+      backend            = 0;
+      backend_fd         = -1;
+      sig_pending        = 0;
 #if EV_ASYNC_ENABLE
-      async_pending     = 0;
+      async_pending      = 0;
 #endif
+      pipe_write_skipped = 0;
+      pipe_write_wanted  = 0;
 #if EV_USE_INOTIFY
-      fs_fd             = flags & EVFLAG_NOINOTIFY ? -1 : -2;
+      fs_fd              = flags & EVFLAG_NOINOTIFY ? -1 : -2;
 #endif
 #if EV_USE_SIGNALFD
-      sigfd             = flags & EVFLAG_SIGNALFD  ? -2 : -1;
+      sigfd              = flags & EVFLAG_SIGNALFD  ? -2 : -1;
 #endif
 
       if (!(flags & EVBACKEND_MASK))
@@ -1768,7 +2320,7 @@ loop_init (EV_P_ unsigned int flags)
 }
 
 /* free up a loop structure */
-void
+void ecb_cold
 ev_loop_destroy (EV_P)
 {
   int i;
@@ -1908,12 +2460,7 @@ loop_fork (EV_P)
 
   if (ev_is_active (&pipe_w))
     {
-      /* this "locks" the handlers against writing to the pipe */
-      /* while we modify the fd vars */
-      sig_pending   = 1;
-#if EV_ASYNC_ENABLE
-      async_pending = 1;
-#endif
+      /* pipe_write_wanted must be false now, so modifying fd vars should be safe */
 
       ev_ref (EV_A);
       ev_io_stop (EV_A_ &pipe_w);
@@ -1941,8 +2488,8 @@ loop_fork (EV_P)
 
 #if EV_MULTIPLICITY
 
-struct ev_loop *
-ev_loop_new (unsigned int flags)
+struct ev_loop * ecb_cold
+ev_loop_new (unsigned int flags) EV_THROW
 {
   EV_P = (struct ev_loop *)ev_malloc (sizeof (struct ev_loop));
 
@@ -1959,7 +2506,7 @@ ev_loop_new (unsigned int flags)
 #endif /* multiplicity */
 
 #if EV_VERIFY
-static void noinline
+static void noinline ecb_cold
 verify_watcher (EV_P_ W w)
 {
   assert (("libev: watcher has invalid priority", ABSPRI (w) >= 0 && ABSPRI (w) < NUMPRI));
@@ -1968,7 +2515,7 @@ verify_watcher (EV_P_ W w)
     assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
 }
 
-static void noinline
+static void noinline ecb_cold
 verify_heap (EV_P_ ANHE *heap, int N)
 {
   int i;
@@ -1983,7 +2530,7 @@ verify_heap (EV_P_ ANHE *heap, int N)
     }
 }
 
-static void noinline
+static void noinline ecb_cold
 array_verify (EV_P_ W *ws, int cnt)
 {
   while (cnt--)
@@ -1995,8 +2542,8 @@ array_verify (EV_P_ W *ws, int cnt)
 #endif
 
 #if EV_FEATURE_API
-void
-ev_verify (EV_P)
+void ecb_cold
+ev_verify (EV_P) EV_THROW
 {
 #if EV_VERIFY
   int i;
@@ -2071,11 +2618,11 @@ ev_verify (EV_P)
 #endif
 
 #if EV_MULTIPLICITY
-struct ev_loop *
+struct ev_loop * ecb_cold
 #else
 int
 #endif
-ev_default_loop (unsigned int flags)
+ev_default_loop (unsigned int flags) EV_THROW
 {
   if (!ev_default_loop_ptr)
     {
@@ -2104,7 +2651,7 @@ ev_default_loop (unsigned int flags)
 }
 
 void
-ev_loop_fork (EV_P)
+ev_loop_fork (EV_P) EV_THROW
 {
   postfork = 1; /* must be in line with ev_default_fork */
 }
@@ -2118,7 +2665,7 @@ ev_invoke (EV_P_ void *w, int revents)
 }
 
 unsigned int
-ev_pending_count (EV_P)
+ev_pending_count (EV_P) EV_THROW
 {
   int pri;
   unsigned int count = 0;
@@ -2210,12 +2757,28 @@ timers_reify (EV_P)
 
 #if EV_PERIODIC_ENABLE
 
-inline_speed void
+static void noinline
 periodic_recalc (EV_P_ ev_periodic *w)
 {
-  /* TODO: use slow but potentially more correct incremental algo, */
-  /* also do not rely on ceil */
-  ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
+  ev_tstamp interval = w->interval > MIN_INTERVAL ? w->interval : MIN_INTERVAL;
+  ev_tstamp at = w->offset + interval * ev_floor ((ev_rt_now - w->offset) / interval);
+
+  /* the above almost always errs on the low side */
+  while (at <= ev_rt_now)
+    {
+      ev_tstamp nat = at + w->interval;
+
+      /* when resolution fails us, we use ev_rt_now */
+      if (expect_false (nat == at))
+        {
+          at = ev_rt_now;
+          break;
+        }
+
+      at = nat;
+    }
+
+  ev_at (w) = at;
 }
 
 /* make periodics pending */
@@ -2247,20 +2810,6 @@ periodics_reify (EV_P)
           else if (w->interval)
             {
               periodic_recalc (EV_A_ w);
-
-              /* if next trigger time is not sufficiently in the future, put it there */
-              /* this might happen because of floating point inexactness */
-              if (ev_at (w) - ev_rt_now < TIME_EPSILON)
-                {
-                  ev_at (w) += w->interval;
-
-                  /* if interval is unreasonably low we might still have a time in the past */
-                  /* so correct this. this will make the periodic very inexact, but the user */
-                  /* has effectively asked to get triggered more often than possible */
-                  if (ev_at (w) < ev_rt_now)
-                    ev_at (w) = ev_rt_now;
-                }
-
               ANHE_at_cache (periodics [HEAP0]);
               downheap (periodics, periodiccnt, HEAP0);
             }
@@ -2278,7 +2827,7 @@ periodics_reify (EV_P)
 
 /* simply recalculate all periodics */
 /* TODO: maybe ensure that at least one event happens when jumping forward? */
-static void noinline
+static void noinline ecb_cold
 periodics_reschedule (EV_P)
 {
   int i;
@@ -2301,7 +2850,7 @@ periodics_reschedule (EV_P)
 #endif
 
 /* adjust all timers by a given offset */
-static void noinline
+static void noinline ecb_cold
 timers_reschedule (EV_P_ ev_tstamp adjust)
 {
   int i;
@@ -2348,9 +2897,12 @@ time_update (EV_P_ ev_tstamp max_block)
        */
       for (i = 4; --i; )
         {
+          ev_tstamp diff;
           rtmn_diff = ev_rt_now - mn_now;
 
-          if (expect_true (fabs (odiff - rtmn_diff) < MIN_TIMEJUMP))
+          diff = odiff - rtmn_diff;
+
+          if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
             return; /* all is well */
 
           ev_rt_now = ev_time ();
@@ -2382,7 +2934,7 @@ time_update (EV_P_ ev_tstamp max_block)
     }
 }
 
-void
+int
 ev_run (EV_P_ int flags)
 {
 #if EV_FEATURE_API
@@ -2450,20 +3002,25 @@ ev_run (EV_P_ int flags)
         /* update time to cancel out callback processing overhead */
         time_update (EV_A_ 1e100);
 
-        if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt)))
+        /* from now on, we want a pipe-wake-up */
+        pipe_write_wanted = 1;
+
+        ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
+
+        if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
           {
             waittime = MAX_BLOCKTIME;
 
             if (timercnt)
               {
-                ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now + backend_fudge;
+                ev_tstamp to = ANHE_at (timers [HEAP0]) - mn_now;
                 if (waittime > to) waittime = to;
               }
 
 #if EV_PERIODIC_ENABLE
             if (periodiccnt)
               {
-                ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now + backend_fudge;
+                ev_tstamp to = ANHE_at (periodics [HEAP0]) - ev_rt_now;
                 if (waittime > to) waittime = to;
               }
 #endif
@@ -2472,13 +3029,18 @@ ev_run (EV_P_ int flags)
             if (expect_false (waittime < timeout_blocktime))
               waittime = timeout_blocktime;
 
+            /* at this point, we NEED to wait, so we have to ensure */
+            /* to pass a minimum nonzero value to the backend */
+            if (expect_false (waittime < backend_mintime))
+              waittime = backend_mintime;
+
             /* extra check because io_blocktime is commonly 0 */
             if (expect_false (io_blocktime))
               {
                 sleeptime = io_blocktime - (mn_now - prev_mn_now);
 
-                if (sleeptime > waittime - backend_fudge)
-                  sleeptime = waittime - backend_fudge;
+                if (sleeptime > waittime - backend_mintime)
+                  sleeptime = waittime - backend_mintime;
 
                 if (expect_true (sleeptime > 0.))
                   {
@@ -2495,6 +3057,15 @@ ev_run (EV_P_ int flags)
         backend_poll (EV_A_ waittime);
         assert ((loop_done = EVBREAK_CANCEL, 1)); /* assert for side effect */
 
+        pipe_write_wanted = 0; /* just an optimisation, no fence needed */
+
+        if (pipe_write_skipped)
+          {
+            assert (("libev: pipe_w not active, but pipe not written", ev_is_active (&pipe_w)));
+            ev_feed_event (EV_A_ &pipe_w, EV_CUSTOM);
+          }
+
+
         /* update ev_rt_now, do magic */
         time_update (EV_A_ waittime + sleeptime);
       }
@@ -2530,40 +3101,42 @@ ev_run (EV_P_ int flags)
 #if EV_FEATURE_API
   --loop_depth;
 #endif
+
+  return activecnt;
 }
 
 void
-ev_break (EV_P_ int how)
+ev_break (EV_P_ int how) EV_THROW
 {
   loop_done = how;
 }
 
 void
-ev_ref (EV_P)
+ev_ref (EV_P) EV_THROW
 {
   ++activecnt;
 }
 
 void
-ev_unref (EV_P)
+ev_unref (EV_P) EV_THROW
 {
   --activecnt;
 }
 
 void
-ev_now_update (EV_P)
+ev_now_update (EV_P) EV_THROW
 {
   time_update (EV_A_ 1e100);
 }
 
 void
-ev_suspend (EV_P)
+ev_suspend (EV_P) EV_THROW
 {
   ev_now_update (EV_A);
 }
 
 void
-ev_resume (EV_P)
+ev_resume (EV_P) EV_THROW
 {
   ev_tstamp mn_prev = mn_now;
 
@@ -2612,7 +3185,7 @@ clear_pending (EV_P_ W w)
 }
 
 int
-ev_clear_pending (EV_P_ void *w)
+ev_clear_pending (EV_P_ void *w) EV_THROW
 {
   W w_ = (W)w;
   int pending = w_->pending;
@@ -2655,7 +3228,7 @@ ev_stop (EV_P_ W w)
 /*****************************************************************************/
 
 void noinline
-ev_io_start (EV_P_ ev_io *w)
+ev_io_start (EV_P_ ev_io *w) EV_THROW
 {
   int fd = w->fd;
 
@@ -2678,7 +3251,7 @@ ev_io_start (EV_P_ ev_io *w)
 }
 
 void noinline
-ev_io_stop (EV_P_ ev_io *w)
+ev_io_stop (EV_P_ ev_io *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -2697,7 +3270,7 @@ ev_io_stop (EV_P_ ev_io *w)
 }
 
 void noinline
-ev_timer_start (EV_P_ ev_timer *w)
+ev_timer_start (EV_P_ ev_timer *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -2721,7 +3294,7 @@ ev_timer_start (EV_P_ ev_timer *w)
 }
 
 void noinline
-ev_timer_stop (EV_P_ ev_timer *w)
+ev_timer_stop (EV_P_ ev_timer *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -2751,10 +3324,12 @@ ev_timer_stop (EV_P_ ev_timer *w)
 }
 
 void noinline
-ev_timer_again (EV_P_ ev_timer *w)
+ev_timer_again (EV_P_ ev_timer *w) EV_THROW
 {
   EV_FREQUENT_CHECK;
 
+  clear_pending (EV_A_ (W)w);
+
   if (ev_is_active (w))
     {
       if (w->repeat)
@@ -2776,14 +3351,14 @@ ev_timer_again (EV_P_ ev_timer *w)
 }
 
 ev_tstamp
-ev_timer_remaining (EV_P_ ev_timer *w)
+ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW
 {
   return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
 }
 
 #if EV_PERIODIC_ENABLE
 void noinline
-ev_periodic_start (EV_P_ ev_periodic *w)
+ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -2813,7 +3388,7 @@ ev_periodic_start (EV_P_ ev_periodic *w)
 }
 
 void noinline
-ev_periodic_stop (EV_P_ ev_periodic *w)
+ev_periodic_stop (EV_P_ ev_periodic *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -2841,7 +3416,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w)
 }
 
 void noinline
-ev_periodic_again (EV_P_ ev_periodic *w)
+ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW
 {
   /* TODO: use adjustheap and recalculation */
   ev_periodic_stop (EV_A_ w);
@@ -2856,7 +3431,7 @@ ev_periodic_again (EV_P_ ev_periodic *w)
 #if EV_SIGNAL_ENABLE
 
 void noinline
-ev_signal_start (EV_P_ ev_signal *w)
+ev_signal_start (EV_P_ ev_signal *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -2937,7 +3512,7 @@ ev_signal_start (EV_P_ ev_signal *w)
 }
 
 void noinline
-ev_signal_stop (EV_P_ ev_signal *w)
+ev_signal_stop (EV_P_ ev_signal *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -2978,7 +3553,7 @@ ev_signal_stop (EV_P_ ev_signal *w)
 #if EV_CHILD_ENABLE
 
 void
-ev_child_start (EV_P_ ev_child *w)
+ev_child_start (EV_P_ ev_child *w) EV_THROW
 {
 #if EV_MULTIPLICITY
   assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
@@ -2995,7 +3570,7 @@ ev_child_start (EV_P_ ev_child *w)
 }
 
 void
-ev_child_stop (EV_P_ ev_child *w)
+ev_child_stop (EV_P_ ev_child *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3157,7 +3732,7 @@ infy_cb (EV_P_ ev_io *w, int revents)
     }
 }
 
-inline_size void
+inline_size void ecb_cold
 ev_check_2625 (EV_P)
 {
   /* kernels < 2.6.25 are borked
@@ -3172,7 +3747,7 @@ ev_check_2625 (EV_P)
 inline_size int
 infy_newfd (void)
 {
-#if defined (IN_CLOEXEC) && defined (IN_NONBLOCK)
+#if defined IN_CLOEXEC && defined IN_NONBLOCK
   int fd = inotify_init1 (IN_CLOEXEC | IN_NONBLOCK);
   if (fd >= 0)
     return fd;
@@ -3257,7 +3832,7 @@ infy_fork (EV_P)
 #endif
 
 void
-ev_stat_stat (EV_P_ ev_stat *w)
+ev_stat_stat (EV_P_ ev_stat *w) EV_THROW
 {
   if (lstat (w->path, &w->attr) < 0)
     w->attr.st_nlink = 0;
@@ -3306,7 +3881,7 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
 }
 
 void
-ev_stat_start (EV_P_ ev_stat *w)
+ev_stat_start (EV_P_ ev_stat *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -3337,7 +3912,7 @@ ev_stat_start (EV_P_ ev_stat *w)
 }
 
 void
-ev_stat_stop (EV_P_ ev_stat *w)
+ev_stat_stop (EV_P_ ev_stat *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3363,7 +3938,7 @@ ev_stat_stop (EV_P_ ev_stat *w)
 
 #if EV_IDLE_ENABLE
 void
-ev_idle_start (EV_P_ ev_idle *w)
+ev_idle_start (EV_P_ ev_idle *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -3386,7 +3961,7 @@ ev_idle_start (EV_P_ ev_idle *w)
 }
 
 void
-ev_idle_stop (EV_P_ ev_idle *w)
+ev_idle_stop (EV_P_ ev_idle *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3410,7 +3985,7 @@ ev_idle_stop (EV_P_ ev_idle *w)
 
 #if EV_PREPARE_ENABLE
 void
-ev_prepare_start (EV_P_ ev_prepare *w)
+ev_prepare_start (EV_P_ ev_prepare *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -3425,7 +4000,7 @@ ev_prepare_start (EV_P_ ev_prepare *w)
 }
 
 void
-ev_prepare_stop (EV_P_ ev_prepare *w)
+ev_prepare_stop (EV_P_ ev_prepare *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3448,7 +4023,7 @@ ev_prepare_stop (EV_P_ ev_prepare *w)
 
 #if EV_CHECK_ENABLE
 void
-ev_check_start (EV_P_ ev_check *w)
+ev_check_start (EV_P_ ev_check *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -3463,7 +4038,7 @@ ev_check_start (EV_P_ ev_check *w)
 }
 
 void
-ev_check_stop (EV_P_ ev_check *w)
+ev_check_stop (EV_P_ ev_check *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3486,7 +4061,7 @@ ev_check_stop (EV_P_ ev_check *w)
 
 #if EV_EMBED_ENABLE
 void noinline
-ev_embed_sweep (EV_P_ ev_embed *w)
+ev_embed_sweep (EV_P_ ev_embed *w) EV_THROW
 {
   ev_run (w->other, EVRUN_NOWAIT);
 }
@@ -3544,7 +4119,7 @@ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
 #endif
 
 void
-ev_embed_start (EV_P_ ev_embed *w)
+ev_embed_start (EV_P_ ev_embed *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -3575,7 +4150,7 @@ ev_embed_start (EV_P_ ev_embed *w)
 }
 
 void
-ev_embed_stop (EV_P_ ev_embed *w)
+ev_embed_stop (EV_P_ ev_embed *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3595,7 +4170,7 @@ ev_embed_stop (EV_P_ ev_embed *w)
 
 #if EV_FORK_ENABLE
 void
-ev_fork_start (EV_P_ ev_fork *w)
+ev_fork_start (EV_P_ ev_fork *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -3610,7 +4185,7 @@ ev_fork_start (EV_P_ ev_fork *w)
 }
 
 void
-ev_fork_stop (EV_P_ ev_fork *w)
+ev_fork_stop (EV_P_ ev_fork *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3633,7 +4208,7 @@ ev_fork_stop (EV_P_ ev_fork *w)
 
 #if EV_CLEANUP_ENABLE
 void
-ev_cleanup_start (EV_P_ ev_cleanup *w)
+ev_cleanup_start (EV_P_ ev_cleanup *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -3650,7 +4225,7 @@ ev_cleanup_start (EV_P_ ev_cleanup *w)
 }
 
 void
-ev_cleanup_stop (EV_P_ ev_cleanup *w)
+ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3674,7 +4249,7 @@ ev_cleanup_stop (EV_P_ ev_cleanup *w)
 
 #if EV_ASYNC_ENABLE
 void
-ev_async_start (EV_P_ ev_async *w)
+ev_async_start (EV_P_ ev_async *w) EV_THROW
 {
   if (expect_false (ev_is_active (w)))
     return;
@@ -3693,7 +4268,7 @@ ev_async_start (EV_P_ ev_async *w)
 }
 
 void
-ev_async_stop (EV_P_ ev_async *w)
+ev_async_stop (EV_P_ ev_async *w) EV_THROW
 {
   clear_pending (EV_A_ (W)w);
   if (expect_false (!ev_is_active (w)))
@@ -3714,7 +4289,7 @@ ev_async_stop (EV_P_ ev_async *w)
 }
 
 void
-ev_async_send (EV_P_ ev_async *w)
+ev_async_send (EV_P_ ev_async *w) EV_THROW
 {
   w->sent = 1;
   evpipe_write (EV_A_ &async_pending);
@@ -3761,7 +4336,7 @@ once_cb_to (EV_P_ ev_timer *w, int revents)
 }
 
 void
-ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg)
+ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW
 {
   struct ev_once *once = (struct ev_once *)ev_malloc (sizeof (struct ev_once));
 
@@ -3792,8 +4367,8 @@ ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, vo
 /*****************************************************************************/
 
 #if EV_WALK_ENABLE
-void
-ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w))
+void ecb_cold
+ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW
 {
   int i, j;
   ev_watcher_list *wl, *wn;
@@ -3846,7 +4421,7 @@ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w))
 
 #if EV_IDLE_ENABLE
   if (types & EV_IDLE)
-    for (j = NUMPRI; i--; )
+    for (j = NUMPRI; j--; )
       for (i = idlecnt [j]; i--; )
         cb (EV_A_ EV_IDLE, idles [j][i]);
 #endif
@@ -3909,5 +4484,3 @@ ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w))
   #include "ev_wrap.h"
 #endif
 
-EV_CPP(})
-
diff --git a/third_party/libev/ev.h b/third_party/libev/ev.h
index 27c1778f1fbb27356d1a02ef513438798f7d8fbe..a8973abf7d1ff5777d91f217293055f11b827463 100644
--- a/third_party/libev/ev.h
+++ b/third_party/libev/ev.h
@@ -1,7 +1,7 @@
 /*
  * libev native API header
  *
- * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
+ * Copyright (c) 2007,2008,2009,2010,2011,2012 Marc Alexander Lehmann <libev@schmorp.de>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -46,6 +46,8 @@
 # define EV_CPP(x)
 #endif
 
+#define EV_THROW EV_CPP(throw())
+
 EV_CPP(extern "C" {)
 
 /*****************************************************************************/
@@ -56,7 +58,11 @@ EV_CPP(extern "C" {)
 #endif
 
 #ifndef EV_FEATURES
-# define EV_FEATURES 0x7f
+# if defined __OPTIMIZE_SIZE__
+#  define EV_FEATURES 0x7c
+# else
+#  define EV_FEATURES 0x7f
+# endif
 #endif
 
 #define EV_FEATURE_CODE     ((EV_FEATURES) &  1)
@@ -185,6 +191,12 @@ struct ev_loop;
 # define EV_INLINE static
 #endif
 
+#ifdef EV_API_STATIC
+# define EV_API_DECL static
+#else
+# define EV_API_DECL extern
+#endif
+
 /* EV_PROTOTYPES can be used to switch of prototype declarations */
 #ifndef EV_PROTOTYPES
 # define EV_PROTOTYPES 1
@@ -193,7 +205,7 @@ struct ev_loop;
 /*****************************************************************************/
 
 #define EV_VERSION_MAJOR 4
-#define EV_VERSION_MINOR 4
+#define EV_VERSION_MINOR 11
 
 /* eventmask, revents, events... */
 enum {
@@ -321,7 +333,7 @@ typedef struct ev_periodic
 
   ev_tstamp offset; /* rw */
   ev_tstamp interval; /* rw */
-  ev_tstamp (*reschedule_cb)(struct ev_periodic *w, ev_tstamp now); /* rw */
+  ev_tstamp (*reschedule_cb)(struct ev_periodic *w, ev_tstamp now) EV_THROW; /* rw */
 } ev_periodic;
 
 /* invoked when the given signal has been received */
@@ -508,15 +520,15 @@ enum {
 };
 
 #if EV_PROTOTYPES
-int ev_version_major (void);
-int ev_version_minor (void);
+EV_API_DECL int ev_version_major (void) EV_THROW;
+EV_API_DECL int ev_version_minor (void) EV_THROW;
 
-unsigned int ev_supported_backends (void);
-unsigned int ev_recommended_backends (void);
-unsigned int ev_embeddable_backends (void);
+EV_API_DECL unsigned int ev_supported_backends (void) EV_THROW;
+EV_API_DECL unsigned int ev_recommended_backends (void) EV_THROW;
+EV_API_DECL unsigned int ev_embeddable_backends (void) EV_THROW;
 
-ev_tstamp ev_time (void);
-void ev_sleep (ev_tstamp delay); /* sleep for a while */
+EV_API_DECL ev_tstamp ev_time (void) EV_THROW;
+EV_API_DECL void ev_sleep (ev_tstamp delay) EV_THROW; /* sleep for a while */
 
 /* Sets the allocation function to use, works like realloc.
  * It is used to allocate and free memory.
@@ -524,22 +536,26 @@ void ev_sleep (ev_tstamp delay); /* sleep for a while */
  * or take some potentially destructive action.
  * The default is your system realloc function.
  */
-void ev_set_allocator (void *(*cb)(void *ptr, long size));
+EV_API_DECL void ev_set_allocator (void *(*cb)(void *ptr, long size) EV_THROW) EV_THROW;
 
 /* set the callback function to call on a
  * retryable syscall error
  * (such as failed select, poll, epoll_wait)
  */
-void ev_set_syserr_cb (void (*cb)(const char *msg));
+EV_API_DECL void ev_set_syserr_cb (void (*cb)(const char *msg) EV_THROW) EV_THROW;
 
 #if EV_MULTIPLICITY
 
 /* the default loop is the only one that handles signals and child watchers */
 /* you can call this as often as you like */
-struct ev_loop *ev_default_loop (unsigned int flags EV_CPP (= 0));
+EV_API_DECL struct ev_loop *ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_THROW;
+
+#ifdef EV_API_STATIC
+EV_API_DECL struct ev_loop *ev_default_loop_ptr;
+#endif
 
 EV_INLINE struct ev_loop *
-ev_default_loop_uc_ (void)
+ev_default_loop_uc_ (void) EV_THROW
 {
   extern struct ev_loop *ev_default_loop_ptr;
 
@@ -547,31 +563,31 @@ ev_default_loop_uc_ (void)
 }
 
 EV_INLINE int
-ev_is_default_loop (EV_P)
+ev_is_default_loop (EV_P) EV_THROW
 {
   return EV_A == EV_DEFAULT_UC;
 }
 
 /* create and destroy alternative loops that don't handle signals */
-struct ev_loop *ev_loop_new (unsigned int flags EV_CPP (= 0));
+EV_API_DECL struct ev_loop *ev_loop_new (unsigned int flags EV_CPP (= 0)) EV_THROW;
 
-ev_tstamp ev_now (EV_P); /* time w.r.t. timers and the eventloop, updated after each poll */
+EV_API_DECL ev_tstamp ev_now (EV_P) EV_THROW; /* time w.r.t. timers and the eventloop, updated after each poll */
 
 #else
 
-int ev_default_loop (unsigned int flags EV_CPP (= 0)); /* returns true when successful */
+EV_API_DECL int ev_default_loop (unsigned int flags EV_CPP (= 0)) EV_THROW; /* returns true when successful */
+
+EV_API_DECL ev_tstamp ev_rt_now;
 
 EV_INLINE ev_tstamp
-ev_now (void)
+ev_now (void) EV_THROW
 {
-  extern ev_tstamp ev_rt_now;
-
   return ev_rt_now;
 }
 
 /* looks weird, but ev_is_default_loop (EV_A) still works if this exists */
 EV_INLINE int
-ev_is_default_loop (void)
+ev_is_default_loop (void) EV_THROW
 {
   return 1;
 }
@@ -579,23 +595,23 @@ ev_is_default_loop (void)
 #endif /* multiplicity */
 
 /* destroy event loops, also works for the default loop */
-void ev_loop_destroy (EV_P);
+EV_API_DECL void ev_loop_destroy (EV_P);
 
 /* this needs to be called after fork, to duplicate the loop */
 /* when you want to re-use it in the child */
 /* you can call it in either the parent or the child */
 /* you can actually call it at any time, anywhere :) */
-void ev_loop_fork (EV_P);
+EV_API_DECL void ev_loop_fork (EV_P) EV_THROW;
 
-unsigned int ev_backend (EV_P); /* backend in use by loop */
+EV_API_DECL unsigned int ev_backend (EV_P) EV_THROW; /* backend in use by loop */
 
-void ev_now_update (EV_P); /* update event loop time */
+EV_API_DECL void ev_now_update (EV_P) EV_THROW; /* update event loop time */
 
 #if EV_WALK_ENABLE
 /* walk (almost) all watchers in the loop of a given type, invoking the */
 /* callback on every such watcher. The callback might stop the watcher, */
 /* but do nothing else with the loop */
-void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w));
+EV_API_DECL void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w)) EV_THROW;
 #endif
 
 #endif /* prototypes */
@@ -614,45 +630,45 @@ enum {
 };
 
 #if EV_PROTOTYPES
-void ev_run (EV_P_ int flags EV_CPP (= 0));
-void ev_break (EV_P_ int how EV_CPP (= EVBREAK_ONE)); /* break out of the loop */
+EV_API_DECL int  ev_run (EV_P_ int flags EV_CPP (= 0));
+EV_API_DECL void ev_break (EV_P_ int how EV_CPP (= EVBREAK_ONE)) EV_THROW; /* break out of the loop */
 
 /*
  * ref/unref can be used to add or remove a refcount on the mainloop. every watcher
  * keeps one reference. if you have a long-running watcher you never unregister that
  * should not keep ev_loop from running, unref() after starting, and ref() before stopping.
  */
-void ev_ref   (EV_P);
-void ev_unref (EV_P);
+EV_API_DECL void ev_ref   (EV_P) EV_THROW;
+EV_API_DECL void ev_unref (EV_P) EV_THROW;
 
 /*
  * convenience function, wait for a single event, without registering an event watcher
  * if timeout is < 0, do wait indefinitely
  */
-void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg);
+EV_API_DECL void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg) EV_THROW;
 
 # if EV_FEATURE_API
-unsigned int ev_iteration (EV_P); /* number of loop iterations */
-unsigned int ev_depth     (EV_P); /* #ev_loop enters - #ev_loop leaves */
-void         ev_verify    (EV_P); /* abort if loop data corrupted */
+EV_API_DECL unsigned int ev_iteration (EV_P) EV_THROW; /* number of loop iterations */
+EV_API_DECL unsigned int ev_depth     (EV_P) EV_THROW; /* #ev_loop enters - #ev_loop leaves */
+EV_API_DECL void         ev_verify    (EV_P) EV_THROW; /* abort if loop data corrupted */
 
-void ev_set_io_collect_interval (EV_P_ ev_tstamp interval); /* sleep at least this time, default 0 */
-void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval); /* sleep at least this time, default 0 */
+EV_API_DECL void ev_set_io_collect_interval (EV_P_ ev_tstamp interval) EV_THROW; /* sleep at least this time, default 0 */
+EV_API_DECL void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval) EV_THROW; /* sleep at least this time, default 0 */
 
 /* advanced stuff for threading etc. support, see docs */
-void ev_set_userdata (EV_P_ void *data);
-void *ev_userdata (EV_P);
-void ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P));
-void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P));
+EV_API_DECL void ev_set_userdata (EV_P_ void *data) EV_THROW;
+EV_API_DECL void *ev_userdata (EV_P) EV_THROW;
+EV_API_DECL void ev_set_invoke_pending_cb (EV_P_ void (*invoke_pending_cb)(EV_P)) EV_THROW;
+EV_API_DECL void ev_set_loop_release_cb (EV_P_ void (*release)(EV_P), void (*acquire)(EV_P) EV_THROW) EV_THROW;
 
-unsigned int ev_pending_count (EV_P); /* number of pending events, if any */
-void ev_invoke_pending (EV_P); /* invoke all pending watchers */
+EV_API_DECL unsigned int ev_pending_count (EV_P) EV_THROW; /* number of pending events, if any */
+EV_API_DECL void ev_invoke_pending (EV_P); /* invoke all pending watchers */
 
 /*
  * stop/start the timer handling.
  */
-void ev_suspend (EV_P);
-void ev_resume  (EV_P);
+EV_API_DECL void ev_suspend (EV_P) EV_THROW;
+EV_API_DECL void ev_resume  (EV_P) EV_THROW;
 #endif
 
 #endif
@@ -717,87 +733,87 @@ void ev_resume  (EV_P);
 /* stopping (disabling, deleting) a watcher does nothing unless its already running */
 #if EV_PROTOTYPES
 
-/* feeds an event into a watcher as if the event actually occured */
+/* feeds an event into a watcher as if the event actually occurred */
 /* accepts any ev_watcher type */
-void ev_feed_event     (EV_P_ void *w, int revents);
-void ev_feed_fd_event  (EV_P_ int fd, int revents);
+EV_API_DECL void ev_feed_event     (EV_P_ void *w, int revents) EV_THROW;
+EV_API_DECL void ev_feed_fd_event  (EV_P_ int fd, int revents) EV_THROW;
 #if EV_SIGNAL_ENABLE
-void ev_feed_signal    (int signum);
-void ev_feed_signal_event (EV_P_ int signum);
+EV_API_DECL void ev_feed_signal    (int signum) EV_THROW;
+EV_API_DECL void ev_feed_signal_event (EV_P_ int signum) EV_THROW;
 #endif
-void ev_invoke         (EV_P_ void *w, int revents);
-int  ev_clear_pending  (EV_P_ void *w);
+EV_API_DECL void ev_invoke         (EV_P_ void *w, int revents);
+EV_API_DECL int  ev_clear_pending  (EV_P_ void *w) EV_THROW;
 
-void ev_io_start       (EV_P_ ev_io *w);
-void ev_io_stop        (EV_P_ ev_io *w);
+EV_API_DECL void ev_io_start       (EV_P_ ev_io *w) EV_THROW;
+EV_API_DECL void ev_io_stop        (EV_P_ ev_io *w) EV_THROW;
 
-void ev_timer_start    (EV_P_ ev_timer *w);
-void ev_timer_stop     (EV_P_ ev_timer *w);
+EV_API_DECL void ev_timer_start    (EV_P_ ev_timer *w) EV_THROW;
+EV_API_DECL void ev_timer_stop     (EV_P_ ev_timer *w) EV_THROW;
 /* stops if active and no repeat, restarts if active and repeating, starts if inactive and repeating */
-void ev_timer_again    (EV_P_ ev_timer *w);
+EV_API_DECL void ev_timer_again    (EV_P_ ev_timer *w) EV_THROW;
 /* return remaining time */
-ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w);
+EV_API_DECL ev_tstamp ev_timer_remaining (EV_P_ ev_timer *w) EV_THROW;
 
 #if EV_PERIODIC_ENABLE
-void ev_periodic_start (EV_P_ ev_periodic *w);
-void ev_periodic_stop  (EV_P_ ev_periodic *w);
-void ev_periodic_again (EV_P_ ev_periodic *w);
+EV_API_DECL void ev_periodic_start (EV_P_ ev_periodic *w) EV_THROW;
+EV_API_DECL void ev_periodic_stop  (EV_P_ ev_periodic *w) EV_THROW;
+EV_API_DECL void ev_periodic_again (EV_P_ ev_periodic *w) EV_THROW;
 #endif
 
 /* only supported in the default loop */
 #if EV_SIGNAL_ENABLE
-void ev_signal_start   (EV_P_ ev_signal *w);
-void ev_signal_stop    (EV_P_ ev_signal *w);
+EV_API_DECL void ev_signal_start   (EV_P_ ev_signal *w) EV_THROW;
+EV_API_DECL void ev_signal_stop    (EV_P_ ev_signal *w) EV_THROW;
 #endif
 
 /* only supported in the default loop */
 # if EV_CHILD_ENABLE
-void ev_child_start    (EV_P_ ev_child *w);
-void ev_child_stop     (EV_P_ ev_child *w);
+EV_API_DECL void ev_child_start    (EV_P_ ev_child *w) EV_THROW;
+EV_API_DECL void ev_child_stop     (EV_P_ ev_child *w) EV_THROW;
 # endif
 
 # if EV_STAT_ENABLE
-void ev_stat_start     (EV_P_ ev_stat *w);
-void ev_stat_stop      (EV_P_ ev_stat *w);
-void ev_stat_stat      (EV_P_ ev_stat *w);
+EV_API_DECL void ev_stat_start     (EV_P_ ev_stat *w) EV_THROW;
+EV_API_DECL void ev_stat_stop      (EV_P_ ev_stat *w) EV_THROW;
+EV_API_DECL void ev_stat_stat      (EV_P_ ev_stat *w) EV_THROW;
 # endif
 
 # if EV_IDLE_ENABLE
-void ev_idle_start     (EV_P_ ev_idle *w);
-void ev_idle_stop      (EV_P_ ev_idle *w);
+EV_API_DECL void ev_idle_start     (EV_P_ ev_idle *w) EV_THROW;
+EV_API_DECL void ev_idle_stop      (EV_P_ ev_idle *w) EV_THROW;
 # endif
 
 #if EV_PREPARE_ENABLE
-void ev_prepare_start  (EV_P_ ev_prepare *w);
-void ev_prepare_stop   (EV_P_ ev_prepare *w);
+EV_API_DECL void ev_prepare_start  (EV_P_ ev_prepare *w) EV_THROW;
+EV_API_DECL void ev_prepare_stop   (EV_P_ ev_prepare *w) EV_THROW;
 #endif
 
 #if EV_CHECK_ENABLE
-void ev_check_start    (EV_P_ ev_check *w);
-void ev_check_stop     (EV_P_ ev_check *w);
+EV_API_DECL void ev_check_start    (EV_P_ ev_check *w) EV_THROW;
+EV_API_DECL void ev_check_stop     (EV_P_ ev_check *w) EV_THROW;
 #endif
 
 # if EV_FORK_ENABLE
-void ev_fork_start     (EV_P_ ev_fork *w);
-void ev_fork_stop      (EV_P_ ev_fork *w);
+EV_API_DECL void ev_fork_start     (EV_P_ ev_fork *w) EV_THROW;
+EV_API_DECL void ev_fork_stop      (EV_P_ ev_fork *w) EV_THROW;
 # endif
 
 # if EV_CLEANUP_ENABLE
-void ev_cleanup_start  (EV_P_ ev_cleanup *w);
-void ev_cleanup_stop   (EV_P_ ev_cleanup *w);
+EV_API_DECL void ev_cleanup_start  (EV_P_ ev_cleanup *w) EV_THROW;
+EV_API_DECL void ev_cleanup_stop   (EV_P_ ev_cleanup *w) EV_THROW;
 # endif
 
 # if EV_EMBED_ENABLE
 /* only supported when loop to be embedded is in fact embeddable */
-void ev_embed_start    (EV_P_ ev_embed *w);
-void ev_embed_stop     (EV_P_ ev_embed *w);
-void ev_embed_sweep    (EV_P_ ev_embed *w);
+EV_API_DECL void ev_embed_start    (EV_P_ ev_embed *w) EV_THROW;
+EV_API_DECL void ev_embed_stop     (EV_P_ ev_embed *w) EV_THROW;
+EV_API_DECL void ev_embed_sweep    (EV_P_ ev_embed *w) EV_THROW;
 # endif
 
 # if EV_ASYNC_ENABLE
-void ev_async_start    (EV_P_ ev_async *w);
-void ev_async_stop     (EV_P_ ev_async *w);
-void ev_async_send     (EV_P_ ev_async *w);
+EV_API_DECL void ev_async_start    (EV_P_ ev_async *w) EV_THROW;
+EV_API_DECL void ev_async_stop     (EV_P_ ev_async *w) EV_THROW;
+EV_API_DECL void ev_async_send     (EV_P_ ev_async *w) EV_THROW;
 # endif
 
 #if EV_COMPAT3
diff --git a/third_party/libev/ev.pod b/third_party/libev/ev.pod
index 4bbef1fcf25b3deb62c8a31bb4fddd2b844c598b..2f90836c387ad3c61bb34a567d4a21694d633e79 100644
--- a/third_party/libev/ev.pod
+++ b/third_party/libev/ev.pod
@@ -176,13 +176,19 @@ library in any way.
 Returns the current time as libev would use it. Please note that the
 C<ev_now> function is usually faster and also often returns the timestamp
 you actually want to know. Also interesting is the combination of
-C<ev_update_now> and C<ev_now>.
+C<ev_now_update> and C<ev_now>.
 
 =item ev_sleep (ev_tstamp interval)
 
-Sleep for the given interval: The current thread will be blocked until
-either it is interrupted or the given time interval has passed. Basically
-this is a sub-second-resolution C<sleep ()>.
+Sleep for the given interval: The current thread will be blocked
+until either it is interrupted or the given time interval has
+passed (approximately - it might return a bit earlier even if not
+interrupted). Returns immediately if C<< interval <= 0 >>.
+
+Basically this is a sub-second-resolution C<sleep ()>.
+
+The range of the C<interval> is limited - libev only guarantees to work
+with sleep times of up to one day (C<< interval <= 86400 >>).
 
 =item int ev_version_major ()
 
@@ -243,7 +249,7 @@ the current system, you would need to look at C<ev_embeddable_backends ()
 
 See the description of C<ev_embed> watchers for more info.
 
-=item ev_set_allocator (void *(*cb)(void *ptr, long size))
+=item ev_set_allocator (void *(*cb)(void *ptr, long size) throw ())
 
 Sets the allocation function to use (the prototype is similar - the
 semantics are identical to the C<realloc> C89/SuS/POSIX function). It is
@@ -279,7 +285,7 @@ retries (example requires a standards-compliant C<realloc>).
    ...
    ev_set_allocator (persistent_realloc);
 
-=item ev_set_syserr_cb (void (*cb)(const char *msg))
+=item ev_set_syserr_cb (void (*cb)(const char *msg) throw ())
 
 Set the callback function to call on a retryable system call error (such
 as failed select, poll, epoll_wait). The message is a printable string
@@ -437,7 +443,7 @@ example) that can't properly initialise their signal masks.
 =item C<EVFLAG_NOSIGMASK>
 
 When this flag is specified, then libev will avoid to modify the signal
-mask. Specifically, this means you ahve to make sure signals are unblocked
+mask. Specifically, this means you have to make sure signals are unblocked
 when you want to receive them.
 
 This behaviour is useful when you want to do your own signal handling, or
@@ -485,10 +491,10 @@ C<EV_WRITE> to C<POLLOUT | POLLERR | POLLHUP>.
 Use the linux-specific epoll(7) interface (for both pre- and post-2.6.9
 kernels).
 
-For few fds, this backend is a bit little slower than poll and select,
-but it scales phenomenally better. While poll and select usually scale
-like O(total_fds) where n is the total number of fds (or the highest fd),
-epoll scales either O(1) or O(active_fds).
+For few fds, this backend is a bit little slower than poll and select, but
+it scales phenomenally better. While poll and select usually scale like
+O(total_fds) where total_fds is the total number of fds (or the highest
+fd), epoll scales either O(1) or O(active_fds).
 
 The epoll mechanism deserves honorable mention as the most misdesigned
 of the more advanced event mechanisms: mere annoyances include silently
@@ -501,19 +507,22 @@ forks then I<both> parent and child process have to recreate the epoll
 set, which can take considerable time (one syscall per file descriptor)
 and is of course hard to detect.
 
-Epoll is also notoriously buggy - embedding epoll fds I<should> work, but
-of course I<doesn't>, and epoll just loves to report events for totally
-I<different> file descriptors (even already closed ones, so one cannot
-even remove them from the set) than registered in the set (especially
-on SMP systems). Libev tries to counter these spurious notifications by
-employing an additional generation counter and comparing that against the
-events to filter out spurious ones, recreating the set when required. Last
+Epoll is also notoriously buggy - embedding epoll fds I<should> work,
+but of course I<doesn't>, and epoll just loves to report events for
+totally I<different> file descriptors (even already closed ones, so
+one cannot even remove them from the set) than registered in the set
+(especially on SMP systems). Libev tries to counter these spurious
+notifications by employing an additional generation counter and comparing
+that against the events to filter out spurious ones, recreating the set
+when required. Epoll also erroneously rounds down timeouts, but gives you
+no way to know when and by how much, so sometimes you have to busy-wait
+because epoll returns immediately despite a nonzero timeout. And last
 not least, it also refuses to work with some file descriptors which work
 perfectly fine with C<select> (files, many character devices...).
 
-Epoll is truly the train wreck analog among event poll mechanisms,
-a frankenpoll, cobbled together in a hurry, no thought to design or
-interaction with others.
+Epoll is truly the train wreck among event poll mechanisms, a frankenpoll,
+cobbled together in a hurry, no thought to design or interaction with
+others. Oh, the pain, will it ever stop...
 
 While stopping, setting and starting an I/O watcher in the same iteration
 will result in some caching, there is still a system call per such
@@ -560,9 +569,9 @@ It scales in the same way as the epoll backend, but the interface to the
 kernel is more efficient (which says nothing about its actual speed, of
 course). While stopping, setting and starting an I/O watcher does never
 cause an extra system call as with C<EVBACKEND_EPOLL>, it still adds up to
-two event changes per incident. Support for C<fork ()> is very bad (but
-sane, unlike epoll) and it drops fds silently in similarly hard-to-detect
-cases
+two event changes per incident. Support for C<fork ()> is very bad (you
+might have to leak fd's on fork, but it's more sane than epoll) and it
+drops fds silently in similarly hard-to-detect cases
 
 This backend usually performs well under most conditions.
 
@@ -601,11 +610,11 @@ hacks).
 
 On the negative side, the interface is I<bizarre> - so bizarre that
 even sun itself gets it wrong in their code examples: The event polling
-function sometimes returning events to the caller even though an error
+function sometimes returns events to the caller even though an error
 occurred, but with no indication whether it has done so or not (yes, it's
-even documented that way) - deadly for edge-triggered interfaces where
-you absolutely have to know whether an event occurred or not because you
-have to re-arm the watcher.
+even documented that way) - deadly for edge-triggered interfaces where you
+absolutely have to know whether an event occurred or not because you have
+to re-arm the watcher.
 
 Fortunately libev seems to be able to work around these idiocies.
 
@@ -785,18 +794,22 @@ without a previous call to C<ev_suspend>.
 Calling C<ev_suspend>/C<ev_resume> has the side effect of updating the
 event loop time (see C<ev_now_update>).
 
-=item ev_run (loop, int flags)
+=item bool ev_run (loop, int flags)
 
 Finally, this is it, the event handler. This function usually is called
 after you have initialised all your watchers and you want to start
 handling events. It will ask the operating system for any new events, call
-the watcher callbacks, an then repeat the whole process indefinitely: This
+the watcher callbacks, and then repeat the whole process indefinitely: This
 is why event loops are called I<loops>.
 
 If the flags argument is specified as C<0>, it will keep handling events
 until either no event watchers are active anymore or C<ev_break> was
 called.
 
+The return value is false if there are no more active watchers (which
+usually means "all jobs done" or "deadlock"), and true in all other cases
+(which usually means " you should call C<ev_run> again").
+
 Please note that an explicit C<ev_break> is usually better than
 relying on all watchers to be stopped when deciding when a program has
 finished (especially in interactive programs), but having a program
@@ -804,8 +817,8 @@ that automatically loops as long as it has to and no longer by virtue
 of relying on its watchers stopping correctly, that is truly a thing of
 beauty.
 
-This function is also I<mostly> exception-safe - you can break out of
-a C<ev_run> call by calling C<longjmp> in a callback, throwing a C++
+This function is I<mostly> exception-safe - you can break out of a
+C<ev_run> call by calling C<longjmp> in a callback, throwing a C++
 exception and so on. This does not decrement the C<ev_depth> value, nor
 will it clear any outstanding C<EVBREAK_ONE> breaks.
 
@@ -827,7 +840,9 @@ with something not expressible using other libev watchers (i.e. "roll your
 own C<ev_run>"). However, a pair of C<ev_prepare>/C<ev_check> watchers is
 usually a better approach for this kind of thing.
 
-Here are the gory details of what C<ev_run> does:
+Here are the gory details of what C<ev_run> does (this is for your
+understanding, not a guarantee that things will work exactly like this in
+future versions):
 
    - Increment loop depth.
    - Reset the ev_break status.
@@ -943,10 +958,11 @@ overhead for the actual polling but can deliver many events at once.
 By setting a higher I<io collect interval> you allow libev to spend more
 time collecting I/O events, so you can handle more events per iteration,
 at the cost of increasing latency. Timeouts (both C<ev_periodic> and
-C<ev_timer>) will be not affected. Setting this to a non-null value will
+C<ev_timer>) will not be affected. Setting this to a non-null value will
 introduce an additional C<ev_sleep ()> call into most loop iterations. The
 sleep time ensures that libev will not poll for I/O events more often then
-once per this interval, on average.
+once per this interval, on average (as long as the host time resolution is
+good enough).
 
 Likewise, by setting a higher I<timeout collect interval> you allow libev
 to spend more time collecting timeouts, at the expense of increased
@@ -1002,7 +1018,7 @@ invoke the actual watchers inside another context (another thread etc.).
 If you want to reset the callback, use C<ev_invoke_pending> as new
 callback.
 
-=item ev_set_loop_release_cb (loop, void (*release)(EV_P), void (*acquire)(EV_P))
+=item ev_set_loop_release_cb (loop, void (*release)(EV_P) throw (), void (*acquire)(EV_P) throw ())
 
 Sometimes you want to share the same loop between multiple threads. This
 can be done relatively simply by putting mutex_lock/unlock calls around
@@ -1010,7 +1026,7 @@ each call to a libev function.
 
 However, C<ev_run> can run an indefinite time, so it is not feasible
 to wait for it to return. One way around this is to wake up the event
-loop via C<ev_break> and C<av_async_send>, another way is to set these
+loop via C<ev_break> and C<ev_async_send>, another way is to set these
 I<release> and I<acquire> callbacks on the loop.
 
 When set, then C<release> will be called just before the thread is
@@ -1376,7 +1392,7 @@ rules might look complicated, they usually do "the right thing".
 
 =item initialiased
 
-Before a watcher can be registered with the event looop it has to be
+Before a watcher can be registered with the event loop it has to be
 initialised. This can be done with a call to C<ev_TYPE_init>, or calls to
 C<ev_init> followed by the watcher-specific C<ev_TYPE_set> function.
 
@@ -1761,10 +1777,11 @@ monotonic clock option helps a lot here).
 
 The callback is guaranteed to be invoked only I<after> its timeout has
 passed (not I<at>, so on systems with very low-resolution clocks this
-might introduce a small delay). If multiple timers become ready during the
-same loop iteration then the ones with earlier time-out values are invoked
-before ones of the same priority with later time-out values (but this is
-no longer true when a callback calls C<ev_run> recursively).
+might introduce a small delay, see "the special problem of being too
+early", below). If multiple timers become ready during the same loop
+iteration then the ones with earlier time-out values are invoked before
+ones of the same priority with later time-out values (but this is no
+longer true when a callback calls C<ev_run> recursively).
 
 =head3 Be smart about timeouts
 
@@ -1849,63 +1866,77 @@ In this case, it would be more efficient to leave the C<ev_timer> alone,
 but remember the time of last activity, and check for a real timeout only
 within the callback:
 
+   ev_tstamp timeout = 60.;
    ev_tstamp last_activity; // time of last activity
+   ev_timer timer;
 
    static void
    callback (EV_P_ ev_timer *w, int revents)
    {
-     ev_tstamp now     = ev_now (EV_A);
-     ev_tstamp timeout = last_activity + 60.;
+     // calculate when the timeout would happen
+     ev_tstamp after = last_activity - ev_now (EV_A) + timeout;
 
-     // if last_activity + 60. is older than now, we did time out
-     if (timeout < now)
+     // if negative, it means we the timeout already occurred
+     if (after < 0.)
        {
          // timeout occurred, take action
        }
      else
        {
-         // callback was invoked, but there was some activity, re-arm
-         // the watcher to fire in last_activity + 60, which is
-         // guaranteed to be in the future, so "again" is positive:
-         w->repeat = timeout - now;
-         ev_timer_again (EV_A_ w);
+         // callback was invoked, but there was some recent 
+         // activity. simply restart the timer to time out
+         // after "after" seconds, which is the earliest time
+         // the timeout can occur.
+         ev_timer_set (w, after, 0.);
+         ev_timer_start (EV_A_ w);
        }
    }
 
-To summarise the callback: first calculate the real timeout (defined
-as "60 seconds after the last activity"), then check if that time has
-been reached, which means something I<did>, in fact, time out. Otherwise
-the callback was invoked too early (C<timeout> is in the future), so
-re-schedule the timer to fire at that future time, to see if maybe we have
-a timeout then.
+To summarise the callback: first calculate in how many seconds the
+timeout will occur (by calculating the absolute time when it would occur,
+C<last_activity + timeout>, and subtracting the current time, C<ev_now
+(EV_A)> from that).
+
+If this value is negative, then we are already past the timeout, i.e. we
+timed out, and need to do whatever is needed in this case.
+
+Otherwise, we now the earliest time at which the timeout would trigger,
+and simply start the timer with this timeout value.
 
-Note how C<ev_timer_again> is used, taking advantage of the
-C<ev_timer_again> optimisation when the timer is already running.
+In other words, each time the callback is invoked it will check whether
+the timeout occurred. If not, it will simply reschedule itself to check
+again at the earliest time it could time out. Rinse. Repeat.
 
 This scheme causes more callback invocations (about one every 60 seconds
 minus half the average time between activity), but virtually no calls to
 libev to change the timeout.
 
-To start the timer, simply initialise the watcher and set C<last_activity>
-to the current time (meaning we just have some activity :), then call the
-callback, which will "do the right thing" and start the timer:
+To start the machinery, simply initialise the watcher and set
+C<last_activity> to the current time (meaning there was some activity just
+now), then call the callback, which will "do the right thing" and start
+the timer:
 
-   ev_init (timer, callback);
-   last_activity = ev_now (loop);
-   callback (loop, timer, EV_TIMER);
+   last_activity = ev_now (EV_A);
+   ev_init (&timer, callback);
+   callback (EV_A_ &timer, 0);
 
-And when there is some activity, simply store the current time in
+When there is some activity, simply store the current time in
 C<last_activity>, no libev calls at all:
 
-   last_activity = ev_now (loop);
+   if (activity detected)
+     last_activity = ev_now (EV_A);
+
+When your timeout value changes, then the timeout can be changed by simply
+providing a new value, stopping the timer and calling the callback, which
+will again do the right thing (for example, time out immediately :).
+
+   timeout = new_value;
+   ev_timer_stop (EV_A_ &timer);
+   callback (EV_A_ &timer, 0);
 
 This technique is slightly more complex, but in most cases where the
 time-out is unlikely to be triggered, much more efficient.
 
-Changing the timeout is trivial as well (if it isn't hard-coded in the
-callback :) - just change the timeout and invoke the callback, which will
-fix things for you.
-
 =item 4. Wee, just use a double-linked list for your timeouts.
 
 If there is not one request, but many thousands (millions...), all
@@ -1941,10 +1972,47 @@ rather complicated, but extremely efficient, something that really pays
 off after the first million or so of active timers, i.e. it's usually
 overkill :)
 
+=head3 The special problem of being too early
+
+If you ask a timer to call your callback after three seconds, then
+you expect it to be invoked after three seconds - but of course, this
+cannot be guaranteed to infinite precision. Less obviously, it cannot be
+guaranteed to any precision by libev - imagine somebody suspending the
+process with a STOP signal for a few hours for example.
+
+So, libev tries to invoke your callback as soon as possible I<after> the
+delay has occurred, but cannot guarantee this.
+
+A less obvious failure mode is calling your callback too early: many event
+loops compare timestamps with a "elapsed delay >= requested delay", but
+this can cause your callback to be invoked much earlier than you would
+expect.
+
+To see why, imagine a system with a clock that only offers full second
+resolution (think windows if you can't come up with a broken enough OS
+yourself). If you schedule a one-second timer at the time 500.9, then the
+event loop will schedule your timeout to elapse at a system time of 500
+(500.9 truncated to the resolution) + 1, or 501.
+
+If an event library looks at the timeout 0.1s later, it will see "501 >=
+501" and invoke the callback 0.1s after it was started, even though a
+one-second delay was requested - this is being "too early", despite best
+intentions.
+
+This is the reason why libev will never invoke the callback if the elapsed
+delay equals the requested delay, but only when the elapsed delay is
+larger than the requested delay. In the example above, libev would only invoke
+the callback at system time 502, or 1.1s after the timer was started.
+
+So, while libev cannot guarantee that your callback will be invoked
+exactly when requested, it I<can> and I<does> guarantee that the requested
+delay has actually elapsed, or in other words, it always errs on the "too
+late" side of things.
+
 =head3 The special problem of time updates
 
-Establishing the current time is a costly operation (it usually takes at
-least two system calls): EV therefore updates its idea of the current
+Establishing the current time is a costly operation (it usually takes
+at least one system call): EV therefore updates its idea of the current
 time only before and after C<ev_run> collects new events, which causes a
 growing difference between C<ev_now ()> and C<ev_time ()> when handling
 lots of events in one iteration.
@@ -1961,6 +2029,39 @@ If the event loop is suspended for a long time, you can also force an
 update of the time returned by C<ev_now ()> by calling C<ev_now_update
 ()>.
 
+=head3 The special problem of unsynchronised clocks
+
+Modern systems have a variety of clocks - libev itself uses the normal
+"wall clock" clock and, if available, the monotonic clock (to avoid time
+jumps).
+
+Neither of these clocks is synchronised with each other or any other clock
+on the system, so C<ev_time ()> might return a considerably different time
+than C<gettimeofday ()> or C<time ()>. On a GNU/Linux system, for example,
+a call to C<gettimeofday> might return a second count that is one higher
+than a directly following call to C<time>.
+
+The moral of this is to only compare libev-related timestamps with
+C<ev_time ()> and C<ev_now ()>, at least if you want better precision than
+a second or so.
+
+One more problem arises due to this lack of synchronisation: if libev uses
+the system monotonic clock and you compare timestamps from C<ev_time>
+or C<ev_now> from when you started your timer and when your callback is
+invoked, you will find that sometimes the callback is a bit "early".
+
+This is because C<ev_timer>s work in real time, not wall clock time, so
+libev makes sure your callback is not invoked before the delay happened,
+I<measured according to the real time>, not the system clock.
+
+If your timeouts are based on a physical timescale (e.g. "time out this
+connection after 100 seconds") then this shouldn't bother you as it is
+exactly the right behaviour.
+
+If you want to compare wall clock/system timestamps to your timers, then
+you need to use C<ev_periodic>s, as these are based on the wall clock
+time, where your comparisons will always generate correct results.
+
 =head3 The special problems of suspended animation
 
 When you leave the server world it is quite customary to hit machines that
@@ -2013,15 +2114,24 @@ do stuff) the timer will not fire more than once per event loop iteration.
 
 =item ev_timer_again (loop, ev_timer *)
 
-This will act as if the timer timed out and restart it again if it is
-repeating. The exact semantics are:
+This will act as if the timer timed out, and restarts it again if it is
+repeating. It basically works like calling C<ev_timer_stop>, updating the
+timeout to the C<repeat> value and calling C<ev_timer_start>.
+
+The exact semantics are as in the following rules, all of which will be
+applied to the watcher:
+
+=over 4
+
+=item If the timer is pending, the pending status is always cleared.
 
-If the timer is pending, its pending status is cleared.
+=item If the timer is started but non-repeating, stop it (as if it timed
+out, without invoking it).
 
-If the timer is started but non-repeating, stop it (as if it timed out).
+=item If the timer is repeating, make the C<repeat> value the new timeout
+and start the timer, if necessary.
 
-If the timer is repeating, either start it if necessary (with the
-C<repeat> value), or reset the running timer to the C<repeat> value.
+=back
 
 This sounds a bit complicated, see L<Be smart about timeouts>, above, for a
 usage example.
@@ -2153,9 +2263,12 @@ Another way to think about it (for the mathematically inclined) is that
 C<ev_periodic> will try to run the callback in this mode at the next possible
 time where C<time = offset (mod interval)>, regardless of any time jumps.
 
-For numerical stability it is preferable that the C<offset> value is near
-C<ev_now ()> (the current time), but there is no range requirement for
-this value, and in fact is often specified as zero.
+The C<interval> I<MUST> be positive, and for numerical stability, the
+interval value should be higher than C<1/8192> (which is around 100
+microseconds) and C<offset> should be higher than C<0> and should have
+at most a similar magnitude as the current time (say, within a factor of
+ten). Typical values for offset are, in fact, C<0> or something between
+C<0> and C<interval>, which is also the recommended range.
 
 Note also that there is an upper limit to how often a timer can fire (CPU
 speed for example), so if C<interval> is very small then timing stability
@@ -3202,14 +3315,11 @@ it by calling C<ev_async_send>, which is thread- and signal safe.
 This functionality is very similar to C<ev_signal> watchers, as signals,
 too, are asynchronous in nature, and signals, too, will be compressed
 (i.e. the number of callback invocations may be less than the number of
-C<ev_async_sent> calls). In fact, you could use signal watchers as a kind
+C<ev_async_send> calls). In fact, you could use signal watchers as a kind
 of "global async watchers" by using a watcher on an otherwise unused
 signal, and C<ev_feed_signal> to signal this watcher from another thread,
 even without knowing which loop owns the signal.
 
-Unlike C<ev_signal> watchers, C<ev_async> works with any event loop, not
-just the default loop.
-
 =head3 Queueing
 
 C<ev_async> does not support queueing of data in any way. The reason
@@ -3318,13 +3428,16 @@ signal or similar contexts (see the discussion of C<EV_ATOMIC_T> in the
 embedding section below on what exactly this means).
 
 Note that, as with other watchers in libev, multiple events might get
-compressed into a single callback invocation (another way to look at this
-is that C<ev_async> watchers are level-triggered, set on C<ev_async_send>,
-reset when the event loop detects that).
+compressed into a single callback invocation (another way to look at
+this is that C<ev_async> watchers are level-triggered: they are set on
+C<ev_async_send>, reset when the event loop detects that).
 
-This call incurs the overhead of a system call only once per event loop
-iteration, so while the overhead might be noticeable, it doesn't apply to
-repeated calls to C<ev_async_send> for the same event loop.
+This call incurs the overhead of at most one extra system call per event
+loop iteration, if the event loop is blocked, and no syscall at all if
+the event loop (or your program) is processing events. That means that
+repeated calls are basically free (there is no need to avoid calls for
+performance reasons) and that the overhead becomes smaller (typically
+zero) under load.
 
 =item bool = ev_async_pending (ev_async *)
 
@@ -3389,7 +3502,7 @@ Example: wait up to ten seconds for data to appear on STDIN_FILENO.
 =item ev_feed_fd_event (loop, int fd, int revents)
 
 Feed an event on the given fd, as if a file descriptor backend detected
-the given events it.
+the given events.
 
 =item ev_feed_signal_event (loop, int signum)
 
@@ -3473,6 +3586,46 @@ real programmers):
        (((char *)w) - offsetof (struct my_biggy, t2));
    }
 
+=head2 AVOIDING FINISHING BEFORE RETURNING
+
+Often you have structures like this in event-based programs:
+
+  callback ()
+  {
+    free (request);
+  }
+
+  request = start_new_request (..., callback);
+
+The intent is to start some "lengthy" operation. The C<request> could be
+used to cancel the operation, or do other things with it.
+
+It's not uncommon to have code paths in C<start_new_request> that
+immediately invoke the callback, for example, to report errors. Or you add
+some caching layer that finds that it can skip the lengthy aspects of the
+operation and simply invoke the callback with the result.
+
+The problem here is that this will happen I<before> C<start_new_request>
+has returned, so C<request> is not set.
+
+Even if you pass the request by some safer means to the callback, you
+might want to do something to the request after starting it, such as
+canceling it, which probably isn't working so well when the callback has
+already been invoked.
+
+A common way around all these issues is to make sure that
+C<start_new_request> I<always> returns before the callback is invoked. If
+C<start_new_request> immediately knows the result, it can artificially
+delay invoking the callback by e.g. using a C<prepare> or C<idle> watcher
+for example, or more sneakily, by reusing an existing (stopped) watcher
+and pushing it into the pending queue:
+
+   ev_set_cb (watcher, callback);
+   ev_feed_event (EV_A_ watcher, 0);
+
+This way, C<start_new_request> can safely return before the callback is
+invoked, while not delaying callback invocation too much.
+
 =head2 MODEL/NESTED EVENT LOOP INVOCATIONS AND EXIT CONDITIONS
 
 Often (especially in GUI toolkits) there are places where you have
@@ -3495,7 +3648,7 @@ triggered, using C<EVRUN_ONCE>:
    while (!exit_main_loop)
      ev_run (EV_DEFAULT_ EVRUN_ONCE);
 
-   // in a model watcher
+   // in a modal watcher
    int exit_nested_loop = 0;
 
    while (!exit_nested_loop)
@@ -3685,7 +3838,7 @@ called):
 
 That basically suspends the coroutine inside C<wait_for_event> and
 continues the libev coroutine, which, when appropriate, switches back to
-this or any other coroutine. I am sure if you sue this your own :)
+this or any other coroutine.
 
 You can do similar tricks if you have, say, threads with an event queue -
 instead of storing a coroutine, you store the queue object and instead of
@@ -3746,6 +3899,39 @@ to use the libev header file and library.
 
 =head1 C++ SUPPORT
 
+=head2 C API
+
+The normal C API should work fine when used from C++: both ev.h and the
+libev sources can be compiled as C++. Therefore, code that uses the C API
+will work fine.
+
+Proper exception specifications might have to be added to callbacks passed
+to libev: exceptions may be thrown only from watcher callbacks, all
+other callbacks (allocator, syserr, loop acquire/release and periodioc
+reschedule callbacks) must not throw exceptions, and might need a C<throw
+()> specification. If you have code that needs to be compiled as both C
+and C++ you can use the C<EV_THROW> macro for this:
+
+   static void
+   fatal_error (const char *msg) EV_THROW
+   {
+     perror (msg);
+     abort ();
+   }
+
+   ...
+   ev_set_syserr_cb (fatal_error);
+
+The only API functions that can currently throw exceptions are C<ev_run>,
+C<ev_invoke>, C<ev_invoke_pending> and C<ev_loop_destroy> (the latter
+because it runs cleanup watchers).
+
+Throwing exceptions in watcher callbacks is only supported if libev itself
+is compiled with a C++ compiler or your C and C++ environments allow
+throwing exceptions through C libraries (most do).
+
+=head2 C++ API
+
 Libev comes with some simplistic wrapper classes for C++ that mainly allow
 you to use some convenience methods to start/stop watchers and also change
 the callback model to a model using method callbacks on objects.
@@ -3770,6 +3956,10 @@ to add as long as they only need one additional pointer for context. If
 you need support for other types of functors please contact the author
 (preferably after implementing it).
 
+For all this to work, your C++ compiler either has to use the same calling
+conventions as your C compiler (for static member functions), or you have
+to embed libev and compile libev itself as C++.
+
 Here is a list of things available in the C<ev> namespace:
 
 =over 4
@@ -3788,7 +3978,7 @@ Aliases to the same types/functions as with the C<ev_> prefix.
 For each C<ev_TYPE> watcher in F<ev.h> there is a corresponding class of
 the same name in the C<ev> namespace, with the exception of C<ev_signal>
 which is called C<ev::sig> to avoid clashes with the C<signal> macro
-defines by many implementations.
+defined by many implementations.
 
 All of those classes have these methods:
 
@@ -3931,7 +4121,7 @@ watchers in the constructor.
    class myclass
    {
      ev::io   io  ; void io_cb   (ev::io   &w, int revents);
-     ev::io2  io2 ; void io2_cb  (ev::io   &w, int revents);
+     ev::io   io2 ; void io2_cb  (ev::io   &w, int revents);
      ev::idle idle; void idle_cb (ev::idle &w, int revents);
 
      myclass (int fd)
@@ -3992,7 +4182,7 @@ L<http://hackage.haskell.org/cgi-bin/hackage-scripts/package/hlibev>.
 =item D
 
 Leandro Lucarella has written a D language binding (F<ev.d>) for libev, to
-be found at L<http://proj.llucax.com.ar/wiki/evd>.
+be found at L<http://www.llucax.com.ar/proj/ev.d/index.html>.
 
 =item Ocaml
 
@@ -4050,7 +4240,11 @@ suitable for use with C<EV_A>.
 =item C<EV_DEFAULT>, C<EV_DEFAULT_>
 
 Similar to the other two macros, this gives you the value of the default
-loop, if multiple loops are supported ("ev loop default").
+loop, if multiple loops are supported ("ev loop default"). The default loop
+will be initialised if it isn't already initialised.
+
+For non-multiplicity builds, these macros do nothing, so you always have
+to initialise the loop somewhere.
 
 =item C<EV_DEFAULT_UC>, C<EV_DEFAULT_UC_>
 
@@ -4206,6 +4400,15 @@ F<event.h> that are not directly supported by the libev core alone.
 In standalone mode, libev will still try to automatically deduce the
 configuration, but has to be more conservative.
 
+=item EV_USE_FLOOR
+
+If defined to be C<1>, libev will use the C<floor ()> function for its
+periodic reschedule calculations, otherwise libev will fall back on a
+portable (slower) implementation. If you enable this, you usually have to
+link against libm or something equivalent. Enabling this when the C<floor>
+function is not available will fail, so the safe default is to not enable
+this.
+
 =item EV_USE_MONOTONIC
 
 If defined to be C<1>, libev will try to detect the availability of the
@@ -4344,16 +4547,32 @@ interface to speed up C<ev_stat> watchers. Its actual availability will
 be detected at runtime. If undefined, it will be enabled if the headers
 indicate GNU/Linux + Glibc 2.4 or newer, otherwise disabled.
 
+=item EV_NO_SMP
+
+If defined to be C<1>, libev will assume that memory is always coherent
+between threads, that is, threads can be used, but threads never run on
+different cpus (or different cpu cores). This reduces dependencies
+and makes libev faster.
+
+=item EV_NO_THREADS
+
+If defined to be C<1>, libev will assume that it will never be called
+from different threads, which is a stronger assumption than C<EV_NO_SMP>,
+above. This reduces dependencies and makes libev faster.
+
 =item EV_ATOMIC_T
 
 Libev requires an integer type (suitable for storing C<0> or C<1>) whose
-access is atomic with respect to other threads or signal contexts. No such
-type is easily found in the C language, so you can provide your own type
-that you know is safe for your purposes. It is used both for signal handler "locking"
-as well as for signal and thread safety in C<ev_async> watchers.
+access is atomic and serialised with respect to other threads or signal
+contexts. No such type is easily found in the C language, so you can
+provide your own type that you know is safe for your purposes. It is used
+both for signal handler "locking" as well as for signal and thread safety
+in C<ev_async> watchers.
 
 In the absence of this define, libev will use C<sig_atomic_t volatile>
-(from F<signal.h>), which is usually good enough on most platforms.
+(from F<signal.h>), which is usually good enough on most platforms,
+although strictly speaking using a type that also implies a memory fence
+is required.
 
 =item EV_H (h)
 
@@ -4387,6 +4606,10 @@ additional independent event loops. Otherwise there will be no support
 for multiple event loops and there is no first event loop pointer
 argument. Instead, all functions act on the single default loop.
 
+Note that C<EV_DEFAULT> and C<EV_DEFAULT_> will no longer provide a
+default loop when multiplicity is switched off - you always have to
+initialise the loop manually in this case.
+
 =item EV_MINPRI
 
 =item EV_MAXPRI
@@ -4432,7 +4655,7 @@ backend, use this:
    #define EV_ASYNC_ENABLE 1
 
 The actual value is a bitset, it can be a combination of the following
-values:
+values (by default, all of these are enabled):
 
 =over 4
 
@@ -4447,6 +4670,9 @@ When optimising for size, use of compiler flags such as C<-Os> with
 gcc is recommended, as well as C<-DNDEBUG>, as libev contains a number of
 assertions.
 
+The default is off when C<__OPTIMIZE_SIZE__> is defined by your compiler
+(e.g. gcc with C<-Os>).
+
 =item C<2> - faster/larger data structures
 
 Replaces the small 2-heap for timer management by a faster 4-heap, larger
@@ -4454,6 +4680,9 @@ hash table sizes and so on. This will usually further increase code size
 and can additionally have an effect on the size of data structures at
 runtime.
 
+The default is off when C<__OPTIMIZE_SIZE__> is defined by your compiler
+(e.g. gcc with C<-Os>).
+
 =item C<4> - full API configuration
 
 This enables priorities (sets C<EV_MAXPRI>=2 and C<EV_MINPRI>=-2), and
@@ -4494,6 +4723,20 @@ when you use C<-Wl,--gc-sections -ffunction-sections>) functions unused by
 your program might be left out as well - a binary starting a timer and an
 I/O watcher then might come out at only 5Kb.
 
+=item EV_API_STATIC
+
+If this symbol is defined (by default it is not), then all identifiers
+will have static linkage. This means that libev will not export any
+identifiers, and you cannot link against libev anymore. This can be useful
+when you embed libev, only want to use libev functions in a single file,
+and do not want its identifiers to be visible.
+
+To use this, define C<EV_API_STATIC> and include F<ev.c> in the file that
+wants to use libev.
+
+This option only works when libev is compiled with a C compiler, as C++
+doesn't support the required declaration syntax.
+
 =item EV_AVOID_STDIO
 
 If this is set to C<1> at compiletime, then libev will avoid using stdio
@@ -4882,7 +5125,7 @@ model. Libev still offers limited functionality on this platform in
 the form of the C<EVBACKEND_SELECT> backend, and only supports socket
 descriptors. This only applies when using Win32 natively, not when using
 e.g. cygwin. Actually, it only applies to the microsofts own compilers,
-as every compielr comes with a slightly differently broken/incompatible
+as every compiler comes with a slightly differently broken/incompatible
 environment.
 
 Lifting these limitations would basically require the full
@@ -5025,8 +5268,12 @@ The type C<double> is used to represent timestamps. It is required to
 have at least 51 bits of mantissa (and 9 bits of exponent), which is
 good enough for at least into the year 4000 with millisecond accuracy
 (the design goal for libev). This requirement is overfulfilled by
-implementations using IEEE 754, which is basically all existing ones. With
-IEEE 754 doubles, you get microsecond accuracy until at least 2200.
+implementations using IEEE 754, which is basically all existing ones.
+
+With IEEE 754 doubles, you get microsecond accuracy until at least the
+year 2255 (and millisecond accuracy till the year 287396 - by then, libev
+is either obsolete or somebody patched it to use C<long double> or
+something like that, just kidding).
 
 =back
 
@@ -5098,8 +5345,9 @@ watchers becomes O(1) with respect to priority handling.
 =item Processing signals: O(max_signal_number)
 
 Sending involves a system call I<iff> there were no other C<ev_async_send>
-calls in the current loop iteration. Checking for async and signal events
-involves iterating over all running async watchers or all signal numbers.
+calls in the current loop iteration and the loop is currently
+blocked. Checking for async and signal events involves iterating over all
+running async watchers or all signal numbers.
 
 =back
 
diff --git a/third_party/libev/ev_epoll.c b/third_party/libev/ev_epoll.c
index 5deb65211181541bbac3f7082856ccdeef3beeba..b4e02c2c1764f3b4acc962355eda38b7e6741b5a 100644
--- a/third_party/libev/ev_epoll.c
+++ b/third_party/libev/ev_epoll.c
@@ -144,11 +144,13 @@ epoll_poll (EV_P_ ev_tstamp timeout)
   int i;
   int eventcnt;
 
+  if (expect_false (epoll_epermcnt))
+    timeout = 0.;
+
   /* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */
   /* the default libev max wait time, however. */
   EV_RELEASE_CB;
-  eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax,
-                         epoll_epermcnt ? 0 : ev_timeout_to_ms (timeout));
+  eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3);
   EV_ACQUIRE_CB;
 
   if (expect_false (eventcnt < 0))
@@ -168,8 +170,12 @@ epoll_poll (EV_P_ ev_tstamp timeout)
       int got  = (ev->events & (EPOLLOUT | EPOLLERR | EPOLLHUP) ? EV_WRITE : 0)
                | (ev->events & (EPOLLIN  | EPOLLERR | EPOLLHUP) ? EV_READ  : 0);
 
-      /* check for spurious notification */
-      /* we assume that fd is always in range, as we never shrink the anfds array */
+      /*
+       * check for spurious notification.
+       * this only finds spurious notifications on egen updates
+       * other spurious notifications will be found by epoll_ctl, below
+       * we assume that fd is always in range, as we never shrink the anfds array
+       */
       if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
         {
           /* recreate kernel state */
@@ -181,8 +187,15 @@ epoll_poll (EV_P_ ev_tstamp timeout)
         {
           anfds [fd].emask = want;
 
-          /* we received an event but are not interested in it, try mod or del */
-          /* I don't think we ever need MOD, but let's handle it anyways */
+          /*
+           * we received an event but are not interested in it, try mod or del
+           * this often happens because we optimistically do not unregister fds
+           * when we are no longer interested in them, but also when we get spurious
+           * notifications for fds from another process. this is partially handled
+           * above with the gencounter check (== our fd is not the event fd), and
+           * partially here, when epoll_ctl returns an error (== a child has the fd
+           * but we closed it).
+           */
           ev->events = (want & EV_READ  ? EPOLLIN  : 0)
                      | (want & EV_WRITE ? EPOLLOUT : 0);
 
@@ -225,7 +238,7 @@ epoll_init (EV_P_ int flags)
 #ifdef EPOLL_CLOEXEC
   backend_fd = epoll_create1 (EPOLL_CLOEXEC);
 
-  if (backend_fd <= 0)
+  if (backend_fd < 0)
 #endif
     backend_fd = epoll_create (256);
 
@@ -234,9 +247,9 @@ epoll_init (EV_P_ int flags)
 
   fcntl (backend_fd, F_SETFD, FD_CLOEXEC);
 
-  backend_fudge  = 0.; /* kernel sources seem to indicate this to be zero */
-  backend_modify = epoll_modify;
-  backend_poll   = epoll_poll;
+  backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */
+  backend_modify  = epoll_modify;
+  backend_poll    = epoll_poll;
 
   epoll_eventmax = 64; /* initial number of events receivable per poll */
   epoll_events = (struct epoll_event *)ev_malloc (sizeof (struct epoll_event) * epoll_eventmax);
diff --git a/third_party/libev/ev_kqueue.c b/third_party/libev/ev_kqueue.c
index 1b526d1c7bc0ded3fb3cc8e0124ec9f85b50ebc5..9faf65a660676867f4cb81eded3420193f9d923f 100644
--- a/third_party/libev/ev_kqueue.c
+++ b/third_party/libev/ev_kqueue.c
@@ -1,7 +1,7 @@
 /*
  * libev kqueue backend
  *
- * Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de>
+ * Copyright (c) 2007,2008,2009,2010,2011,2012 Marc Alexander Lehmann <libev@schmorp.de>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -155,15 +155,16 @@ kqueue_poll (EV_P_ ev_tstamp timeout)
 int inline_size
 kqueue_init (EV_P_ int flags)
 {
-  /* Initialize the kernel queue */
+  /* initialize the kernel queue */
+  kqueue_fd_pid = getpid ();
   if ((backend_fd = kqueue ()) < 0)
     return 0;
 
   fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
 
-  backend_fudge  = 0.;
-  backend_modify = kqueue_modify;
-  backend_poll   = kqueue_poll;
+  backend_mintime = 1e-9; /* apparently, they did the right thing in freebsd */
+  backend_modify  = kqueue_modify;
+  backend_poll    = kqueue_poll;
 
   kqueue_eventmax = 64; /* initial number of events receivable per poll */
   kqueue_events = (struct kevent *)ev_malloc (sizeof (struct kevent) * kqueue_eventmax);
@@ -185,8 +186,20 @@ kqueue_destroy (EV_P)
 void inline_size
 kqueue_fork (EV_P)
 {
-  close (backend_fd);
-
+  /* some BSD kernels don't just destroy the kqueue itself,
+   * but also close the fd, which isn't documented, and
+   * impossible to support properly.
+   * we remember the pid of the kqueue call and only close
+   * the fd if the pid is still the same.
+   * this leaks fds on sane kernels, but BSD interfaces are
+   * notoriously buggy and rarely get fixed.
+   */
+  pid_t newpid = getpid ();
+
+  if (newpid == kqueue_fd_pid)
+    close (backend_fd);
+
+  kqueue_fd_pid = newpid;
   while ((backend_fd = kqueue ()) < 0)
     ev_syserr ("(libev) kqueue");
 
@@ -196,3 +209,6 @@ kqueue_fork (EV_P)
   fd_rearm_all (EV_A);
 }
 
+/* sys/event.h defines EV_ERROR */
+#undef EV_ERROR
+
diff --git a/third_party/libev/ev_poll.c b/third_party/libev/ev_poll.c
index e53ae0de93c6055e2346e1a5d02afab295ea3e20..48323516dd0c5b7b5bae67c6b7279b611939aedf 100644
--- a/third_party/libev/ev_poll.c
+++ b/third_party/libev/ev_poll.c
@@ -92,7 +92,7 @@ poll_poll (EV_P_ ev_tstamp timeout)
   int res;
   
   EV_RELEASE_CB;
-  res = poll (polls, pollcnt, ev_timeout_to_ms (timeout));
+  res = poll (polls, pollcnt, timeout * 1e3);
   EV_ACQUIRE_CB;
 
   if (expect_false (res < 0))
@@ -129,9 +129,9 @@ poll_poll (EV_P_ ev_tstamp timeout)
 int inline_size
 poll_init (EV_P_ int flags)
 {
-  backend_fudge  = 0.; /* posix says this is zero */
-  backend_modify = poll_modify;
-  backend_poll   = poll_poll;
+  backend_mintime = 1e-3;
+  backend_modify  = poll_modify;
+  backend_poll    = poll_poll;
 
   pollidxs = 0; pollidxmax = 0;
   polls    = 0; pollmax    = 0; pollcnt = 0;
diff --git a/third_party/libev/ev_port.c b/third_party/libev/ev_port.c
index 0ffebc372672866e3bfde9abe18cbf717b32a24f..9368501cd5d9214e47abf1d2b5e8b89bc1414771 100644
--- a/third_party/libev/ev_port.c
+++ b/third_party/libev/ev_port.c
@@ -147,9 +147,15 @@ port_init (EV_P_ int flags)
 
   fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
 
-  backend_fudge  = 1e-3; /* needed to compensate for port_getn returning early */
-  backend_modify = port_modify;
-  backend_poll   = port_poll;
+  /* if my reading of the opensolaris kernel sources are correct, then
+   * opensolaris does something very stupid: it checks if the time has already
+   * elapsed and doesn't round up if that is the case,m otherwise it DOES round
+   * up. Since we can't know what the case is, we need to guess by using a
+   * "large enough" timeout. Normally, 1e-9 would be correct.
+   */
+  backend_mintime = 1e-3; /* needed to compensate for port_getn returning early */
+  backend_modify  = port_modify;
+  backend_poll    = port_poll;
 
   port_eventmax = 64; /* initial number of events receivable per poll */
   port_events = (port_event_t *)ev_malloc (sizeof (port_event_t) * port_eventmax);
diff --git a/third_party/libev/ev_select.c b/third_party/libev/ev_select.c
index 0ea9467e4015a5df7fcf1de9536f8f8d803cdaf6..f38d6ca3e01a00d48dd0b858eca0531b981c7912 100644
--- a/third_party/libev/ev_select.c
+++ b/third_party/libev/ev_select.c
@@ -1,7 +1,7 @@
 /*
  * libev select fd activity backend
  *
- * Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de>
+ * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -195,7 +195,12 @@ select_poll (EV_P_ ev_tstamp timeout)
        */
       if (errno == EINVAL)
         {
-          ev_sleep (timeout);
+          if (timeout)
+            {
+              unsigned long ms = timeout * 1e3;
+              Sleep (ms ? ms : 1);
+            }
+
           return;
         }
       #endif
@@ -269,9 +274,9 @@ select_poll (EV_P_ ev_tstamp timeout)
 int inline_size
 select_init (EV_P_ int flags)
 {
-  backend_fudge  = 0.; /* posix says this is zero */
-  backend_modify = select_modify;
-  backend_poll   = select_poll;
+  backend_mintime = 1e-6;
+  backend_modify  = select_modify;
+  backend_poll    = select_poll;
 
 #if EV_SELECT_USE_FD_SET
   vec_ri  = ev_malloc (sizeof (fd_set)); FD_ZERO ((fd_set *)vec_ri);
@@ -307,4 +312,3 @@ select_destroy (EV_P)
   #endif
 }
 
-
diff --git a/third_party/libev/ev_vars.h b/third_party/libev/ev_vars.h
index 5ee3ed1b2a0eb2368cce927ba8af84bf37d8898f..5cd26d0e13b0ed2d60b139be043afcc649eba4fd 100644
--- a/third_party/libev/ev_vars.h
+++ b/third_party/libev/ev_vars.h
@@ -1,7 +1,7 @@
 /*
  * loop member variable declarations
  *
- * Copyright (c) 2007,2008,2009,2010,2011 Marc Alexander Lehmann <libev@schmorp.de>
+ * Copyright (c) 2007,2008,2009,2010,2011,2012 Marc Alexander Lehmann <libev@schmorp.de>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -51,7 +51,7 @@ VARx(int, activecnt) /* total number of active events ("refcount") */
 VARx(EV_ATOMIC_T, loop_done)  /* signal by ev_break */
 
 VARx(int, backend_fd)
-VARx(ev_tstamp, backend_fudge) /* assumed typical timer resolution */
+VARx(ev_tstamp, backend_mintime) /* assumed typical timer resolution */
 VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev))
 VAR (backend_poll  , void (*backend_poll)(EV_P_ ev_tstamp timeout))
 
@@ -73,6 +73,8 @@ VARx(int, evfd)
 #endif
 VAR (evpipe, int evpipe [2])
 VARx(ev_io, pipe_w)
+VARx(EV_ATOMIC_T, pipe_write_wanted)
+VARx(EV_ATOMIC_T, pipe_write_skipped)
 
 #if !defined(_WIN32) || EV_GENWRAP
 VARx(pid_t, curpid)
@@ -108,6 +110,7 @@ VARx(int, epoll_epermmax)
 #endif
 
 #if EV_USE_KQUEUE || EV_GENWRAP
+VARx(pid_t, kqueue_fd_pid)
 VARx(struct kevent *, kqueue_changes)
 VARx(int, kqueue_changemax)
 VARx(int, kqueue_changecnt)
@@ -180,7 +183,6 @@ VAR (fs_hash, ANFS fs_hash [EV_INOTIFY_HASHSIZE])
 #endif
 
 VARx(EV_ATOMIC_T, sig_pending)
-VARx(int, nosigmask)
 #if EV_USE_SIGNALFD || EV_GENWRAP
 VARx(int, sigfd)
 VARx(ev_io, sigfd_w)
@@ -194,8 +196,8 @@ VARx(unsigned int, loop_count) /* total number of loop iterations/blocks */
 VARx(unsigned int, loop_depth) /* #ev_run enters - #ev_run leaves */
 
 VARx(void *, userdata)
-VAR (release_cb, void (*release_cb)(EV_P))
-VAR (acquire_cb, void (*acquire_cb)(EV_P))
+VAR (release_cb, void (*release_cb)(EV_P) EV_THROW)
+VAR (acquire_cb, void (*acquire_cb)(EV_P) EV_THROW)
 VAR (invoke_cb , void (*invoke_cb) (EV_P))
 #endif
 
diff --git a/third_party/libev/ev_win32.c b/third_party/libev/ev_win32.c
index 338886efe407cfba00e33a1e425cc90d4d760a0c..9217af585c64602595081c660cedd4d3b160e6c0 100644
--- a/third_party/libev/ev_win32.c
+++ b/third_party/libev/ev_win32.c
@@ -48,6 +48,16 @@
 /* for the crt to do something about it */
 volatile double SIGFPE_REQ = 0.0f;
 
+static SOCKET
+ev_tcp_socket (void)
+{
+#if EV_USE_WSASOCKET
+  return WSASocket (AF_INET, SOCK_STREAM, 0, 0, 0, 0);
+#else
+  return socket (AF_INET, SOCK_STREAM, 0);
+#endif
+}
+
 /* oh, the humanity! */
 static int
 ev_pipe (int filedes [2])
@@ -59,7 +69,7 @@ ev_pipe (int filedes [2])
   SOCKET listener;
   SOCKET sock [2] = { -1, -1 };
 
-  if ((listener = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
+  if ((listener = ev_tcp_socket ()) == INVALID_SOCKET)
     return -1;
 
   addr.sin_family = AF_INET;
@@ -75,7 +85,7 @@ ev_pipe (int filedes [2])
   if (listen (listener, 1))
     goto fail;
 
-  if ((sock [0] = socket (AF_INET, SOCK_STREAM, 0)) == INVALID_SOCKET)
+  if ((sock [0] = ev_tcp_socket ()) == INVALID_SOCKET)
     goto fail;
 
   if (connect (sock [0], (struct sockaddr *)&addr, addr_size))
diff --git a/third_party/libev/ev_wrap.h b/third_party/libev/ev_wrap.h
index 2c195c5db4b2f8e65de6e55603b9752f3729b24b..3c47c6e092f71fc580afb55ff88daff8335b3b7d 100644
--- a/third_party/libev/ev_wrap.h
+++ b/third_party/libev/ev_wrap.h
@@ -10,7 +10,7 @@
 #define activecnt ((loop)->activecnt)
 #define loop_done ((loop)->loop_done)
 #define backend_fd ((loop)->backend_fd)
-#define backend_fudge ((loop)->backend_fudge)
+#define backend_mintime ((loop)->backend_mintime)
 #define backend_modify ((loop)->backend_modify)
 #define backend_poll ((loop)->backend_poll)
 #define anfds ((loop)->anfds)
@@ -25,6 +25,8 @@
 #define evfd ((loop)->evfd)
 #define evpipe ((loop)->evpipe)
 #define pipe_w ((loop)->pipe_w)
+#define pipe_write_wanted ((loop)->pipe_write_wanted)
+#define pipe_write_skipped ((loop)->pipe_write_skipped)
 #define curpid ((loop)->curpid)
 #define postfork ((loop)->postfork)
 #define vec_ri ((loop)->vec_ri)
@@ -43,6 +45,7 @@
 #define epoll_eperms ((loop)->epoll_eperms)
 #define epoll_epermcnt ((loop)->epoll_epermcnt)
 #define epoll_epermmax ((loop)->epoll_epermmax)
+#define kqueue_fd_pid ((loop)->kqueue_fd_pid)
 #define kqueue_changes ((loop)->kqueue_changes)
 #define kqueue_changemax ((loop)->kqueue_changemax)
 #define kqueue_changecnt ((loop)->kqueue_changecnt)
@@ -85,7 +88,6 @@
 #define fs_2625 ((loop)->fs_2625)
 #define fs_hash ((loop)->fs_hash)
 #define sig_pending ((loop)->sig_pending)
-#define nosigmask ((loop)->nosigmask)
 #define sigfd ((loop)->sigfd)
 #define sigfd_w ((loop)->sigfd_w)
 #define sigfd_set ((loop)->sigfd_set)
@@ -107,7 +109,7 @@
 #undef activecnt
 #undef loop_done
 #undef backend_fd
-#undef backend_fudge
+#undef backend_mintime
 #undef backend_modify
 #undef backend_poll
 #undef anfds
@@ -122,6 +124,8 @@
 #undef evfd
 #undef evpipe
 #undef pipe_w
+#undef pipe_write_wanted
+#undef pipe_write_skipped
 #undef curpid
 #undef postfork
 #undef vec_ri
@@ -140,6 +144,7 @@
 #undef epoll_eperms
 #undef epoll_epermcnt
 #undef epoll_epermmax
+#undef kqueue_fd_pid
 #undef kqueue_changes
 #undef kqueue_changemax
 #undef kqueue_changecnt
@@ -182,7 +187,6 @@
 #undef fs_2625
 #undef fs_hash
 #undef sig_pending
-#undef nosigmask
 #undef sigfd
 #undef sigfd_w
 #undef sigfd_set
diff --git a/third_party/libev/event.c b/third_party/libev/event.c
index aaf6d534ca45f8c2c7041e3329bbda88a1d08ad6..5586cd353835dc0033dc6f74649aed4a2959932c 100644
--- a/third_party/libev/event.c
+++ b/third_party/libev/event.c
@@ -1,7 +1,7 @@
 /*
  * libevent compatibility layer
  *
- * Copyright (c) 2007,2008,2009,2010 Marc Alexander Lehmann <libev@schmorp.de>
+ * Copyright (c) 2007,2008,2009,2010,2012 Marc Alexander Lehmann <libev@schmorp.de>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -78,13 +78,15 @@ ev_tv_get (struct timeval *tv)
 #define EVENT_STRINGIFY(s) # s
 #define EVENT_VERSION(a,b) EVENT_STRINGIFY (a) "." EVENT_STRINGIFY (b)
 
-const char *event_get_version (void)
+const char *
+event_get_version (void)
 {
   /* returns ABI, not API or library, version */
   return EVENT_VERSION (EV_VERSION_MAJOR, EV_VERSION_MINOR);
 }
 
-const char *event_get_method (void)
+const char *
+event_get_method (void)
 {
   return "libev";
 }
@@ -105,6 +107,23 @@ void *event_init (void)
   return ev_x_cur;
 }
 
+const char *
+event_base_get_method (const struct event_base *base)
+{
+  return "libev";
+}
+
+struct event_base *
+event_base_new (void)
+{
+#if EV_MULTIPLICITY
+  return (struct event_base *)ev_loop_new (EVFLAG_AUTO);
+#else
+  assert (("libev: multiple event bases not supported when not compiled with EV_MULTIPLICITY"));
+  return NULL;
+#endif
+}
+
 void event_base_free (struct event_base *base)
 {
   dLOOPbase;
@@ -137,6 +156,12 @@ int event_loopexit (struct timeval *tv)
   return event_base_loopexit (ev_x_cur, tv);
 }
 
+event_callback_fn event_get_callback
+(const struct event *ev)
+{
+  return ev->ev_callback;
+}
+
 static void
 ev_x_cb (struct event *ev, int revents)
 {
@@ -332,9 +357,7 @@ int event_base_loop (struct event_base *base, int flags)
 {
   dLOOPbase;
 
-  ev_run (EV_A_ flags);
-
-  return 0;
+  return !ev_run (EV_A_ flags);
 }
 
 int event_base_dispatch (struct event_base *base)
diff --git a/third_party/libev/event.h b/third_party/libev/event.h
index 10ff05a3cb4d64a397bb3aada943f2836858161a..aa81928f35649a38f26b7674ded4e5f1ee5e774a 100644
--- a/third_party/libev/event.h
+++ b/third_party/libev/event.h
@@ -1,7 +1,7 @@
 /*
  * libevent compatibility header, only core events supported
  *
- * Copyright (c) 2007,2008,2010 Marc Alexander Lehmann <libev@schmorp.de>
+ * Copyright (c) 2007,2008,2010,2012 Marc Alexander Lehmann <libev@schmorp.de>
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without modifica-
@@ -75,6 +75,8 @@ struct event_base;
 #define EVLIST_INTERNAL 0x10
 #define EVLIST_INIT     0x80
 
+typedef void (*event_callback_fn)(int, short, void *);
+
 struct event
 {
   /* libev watchers we map onto */
@@ -86,7 +88,7 @@ struct event
 
   /* compatibility slots */
   struct event_base *ev_base;
-  void (*ev_callback)(int, short, void *arg);
+  event_callback_fn ev_callback;
   void *ev_arg;
   int ev_fd;
   int ev_pri;
@@ -95,9 +97,12 @@ struct event
   short ev_events;
 };
 
+event_callback_fn event_get_callback (const struct event *ev);
+
 #define EV_READ                    EV_READ
 #define EV_WRITE                   EV_WRITE
 #define EV_PERSIST                 0x10
+#define EV_ET                      0x20 /* nop */
 
 #define EVENT_SIGNAL(ev)           ((int) (ev)->ev_fd)
 #define EVENT_FD(ev)               ((int) (ev)->ev_fd)
@@ -152,6 +157,8 @@ int event_pending (struct event *ev, short, struct timeval *tv);
 int event_priority_init (int npri);
 int event_priority_set (struct event *ev, int pri);
 
+struct event_base *event_base_new (void);
+const char *event_base_get_method (const struct event_base *);
 int event_base_set (struct event_base *base, struct event *ev);
 int event_base_loop (struct event_base *base, int);
 int event_base_loopexit (struct event_base *base, struct timeval *tv);
diff --git a/third_party/libev/libev.m4 b/third_party/libev/libev.m4
index e3f4c81b236d53a9f0ea2aa965a4264b2958bfcd..6fdb13f62271b55d105e19d0cc6964d9c5cd2c32 100644
--- a/third_party/libev/libev.m4
+++ b/third_party/libev/libev.m4
@@ -16,7 +16,7 @@ AC_CHECK_FUNCS(clock_gettime, [], [
                        #include <time.h>],
                       [struct timespec ts; int status = syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts)])],
                      [ac_have_clock_syscall=1
-                      AC_DEFINE(HAVE_CLOCK_SYSCALL, 1, "use syscall interface for clock_gettime")
+                      AC_DEFINE(HAVE_CLOCK_SYSCALL, 1, Define to 1 to use the syscall interface for clock_gettime)
                       AC_MSG_RESULT(yes)],
                      [AC_MSG_RESULT(no)])
    fi
@@ -35,5 +35,8 @@ AC_CHECK_FUNCS(nanosleep, [], [
    fi
 ])
 
-AC_CHECK_LIB(m, ceil)
+if test -z "$LIBEV_M4_AVOID_LIBM"; then
+   LIBM=m
+fi
+AC_SEARCH_LIBS(floor, $LIBM, [AC_DEFINE(HAVE_FLOOR, 1, Define to 1 if the floor function is available)])
 
diff --git a/third_party/libev/update_ev_c b/third_party/libev/update_ev_c
new file mode 100755
index 0000000000000000000000000000000000000000..b55fd7fb79948077d6a49122971723e9cdeb419d
--- /dev/null
+++ b/third_party/libev/update_ev_c
@@ -0,0 +1,8 @@
+#!/bin/sh -e
+
+(
+   sed -ne '1,\%/\* ECB.H BEGIN \*/%p' ev.c
+   cat ~/src/libecb/ecb.h
+   sed -ne '\%/\* ECB.H END \*/%,$p' ev.c
+) >ev.c~ && mv ev.c~ ev.c
+