summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNick Morrott <nickm@debian.org>2019-12-02 02:49:10 +0100
committerNick Morrott <nickm@debian.org>2019-12-02 02:49:10 +0100
commit8141ce42e129eb43c9cd64b85c49de8061989d88 (patch)
treef62c312e8601ea01152452ad401c2f171f457ec2
parent7f7298c8560306cbf1cf58751d48d7adb6292381 (diff)
parent2b10520f73a0c3c80bb9b79040f29b30b19c9105 (diff)
Record libev-perl (4.30-1) in archive suite sid
-rw-r--r--Changes23
-rw-r--r--EV.pm4
-rw-r--r--EV.xs16
-rw-r--r--MANIFEST2
-rw-r--r--META.json2
-rw-r--r--META.yml2
-rw-r--r--Makefile.PL67
-rw-r--r--README2
-rw-r--r--debian/changelog19
-rw-r--r--debian/control8
-rw-r--r--debian/copyright1
-rw-r--r--debian/upstream/metadata4
-rw-r--r--debian/watch4
-rw-r--r--libev/Changes43
-rw-r--r--libev/ev.c436
-rw-r--r--libev/ev.h10
-rw-r--r--libev/ev.pod17
-rw-r--r--libev/ev_epoll.c30
-rw-r--r--libev/ev_iouring.c645
-rw-r--r--libev/ev_kqueue.c8
-rw-r--r--libev/ev_linuxaio.c179
-rw-r--r--libev/ev_poll.c12
-rw-r--r--libev/ev_port.c8
-rw-r--r--libev/ev_select.c12
-rw-r--r--libev/ev_vars.h31
-rw-r--r--libev/ev_win32.c4
-rw-r--r--libev/ev_wrap.h54
-rw-r--r--schmorp.h2
28 files changed, 1320 insertions, 325 deletions
diff --git a/Changes b/Changes
index 1e5ebd1..8cdb749 100644
--- a/Changes
+++ b/Changes
@@ -3,6 +3,29 @@ Revision history for Perl extension EV
Changes marked with (libev) are changes in libev, and have more
documentation in the libev Changes file.
+4.30 Fri Nov 22 21:00:00 CET 2019
+ - (libev) use a different and hopefully even more portable
+ test to disable io_uring when header files are too old,
+ by directly testing LINUX_VERSION_CODE.
+ - (libev) fix a bug in the io_uring backend that polled the wrong
+ backend fd, causing it to not work in many cases.
+
+4.29 Fri Nov 22 15:34:29 CET 2019
+ - (libev) add io uring autoconf and non-autoconf detection,
+ the latter of which should disable io_uring compilation
+ on old systems.
+
+4.28 Tue Nov 19 13:55:39 CET 2019
+ - (libev) fix ev_port backend, thanks to David H. Gutteridge for
+ - (libev) many bugfixes in linuxaio backend.
+ - (libev) experimental io uring interface.
+ reminding me to actually release the fix.
+ - try to name ev.h more explicitly, to hopefully improve portability.
+ - opportunistically round up wait times for poll and epoll backend,
+ to avoid unnecessary loop iterations.
+ - add build dependency on ev_linuxaio.c.
+ - quickly (re)-ported to minix 3.3 before minix crashed again.
+
4.27 Thu Jun 27 09:39:58 CEST 2019
- (libev) completely rewritten linuxaio backend, maybe
usable as a general-use backend.
diff --git a/EV.pm b/EV.pm
index f24db60..21c48f6 100644
--- a/EV.pm
+++ b/EV.pm
@@ -121,7 +121,7 @@ package EV;
use common::sense;
BEGIN {
- our $VERSION = 4.27;
+ our $VERSION = '4.30';
use XSLoader;
local $^W = 0; # avoid spurious warning
XSLoader::load "EV", $VERSION;
@@ -372,7 +372,7 @@ if the signal specified by C<$signal> had occurred.
Feed a signal event into EV - unlike C<EV::feed_signal_event>, this works
regardless of which loop has registered the signal, and is mainly useful
-fro custom signal implementations.
+for custom signal implementations.
=item EV::set_io_collect_interval $time
diff --git a/EV.xs b/EV.xs
index 9bb6b66..7134d0e 100644
--- a/EV.xs
+++ b/EV.xs
@@ -31,7 +31,7 @@ sv_fileno (SV *fh)
#define EV_USE_NANOSLEEP EV_USE_MONOTONIC
#define EV_USE_FLOOR 1
#define EV_API_STATIC
-#define EV_H <ev.h>
+#define EV_H "../libev/ev.h"
#define EV_CONFIG_H error
#include "EV/EVAPI.h"
@@ -44,7 +44,7 @@ sv_fileno (SV *fh)
/* due to bugs in OS X we have to use libev/ explicitly here */
#include "libev/ev.c"
-#if !defined _WIN32 && !defined _MINIX && !EV_NO_ATFORK
+#if !defined _WIN32 && !defined __minix && !EV_NO_ATFORK
# include <pthread.h>
#endif
@@ -204,11 +204,11 @@ e_cb (EV_P_ ev_watcher *w, int revents)
SV *sv_self, *sv_events;
/* libev might have stopped the watcher */
- if (expect_false (w->e_flags & WFLAG_UNREFED)
+ if (ecb_expect_false (w->e_flags & WFLAG_UNREFED)
&& !ev_is_active (w))
REF (w);
- if (expect_true (sv_self_cache))
+ if (ecb_expect_true (sv_self_cache))
{
sv_self = sv_self_cache; sv_self_cache = 0;
SvRV_set (sv_self, SvREFCNT_inc_NN (w->self));
@@ -219,7 +219,7 @@ e_cb (EV_P_ ev_watcher *w, int revents)
SvREADONLY_on (sv_self);
}
- if (expect_true (sv_events_cache))
+ if (ecb_expect_true (sv_events_cache))
{
sv_events = sv_events_cache; sv_events_cache = 0;
SvIV_set (sv_events, revents);
@@ -239,7 +239,7 @@ e_cb (EV_P_ ev_watcher *w, int revents)
PUTBACK;
call_sv (w->cb_sv, G_DISCARD | G_VOID | G_EVAL);
- if (expect_false (SvREFCNT (sv_self) != 1 || sv_self_cache))
+ if (ecb_expect_false (SvREFCNT (sv_self) != 1 || sv_self_cache))
SvREFCNT_dec (sv_self);
else
{
@@ -248,12 +248,12 @@ e_cb (EV_P_ ev_watcher *w, int revents)
sv_self_cache = sv_self;
}
- if (expect_false (SvREFCNT (sv_events) != 1 || sv_events_cache))
+ if (ecb_expect_false (SvREFCNT (sv_events) != 1 || sv_events_cache))
SvREFCNT_dec (sv_events);
else
sv_events_cache = sv_events;
- if (expect_false (SvTRUE (ERRSV)))
+ if (ecb_expect_false (SvTRUE (ERRSV)))
{
SPAGAIN;
PUSHMARK (SP);
diff --git a/MANIFEST b/MANIFEST
index d7e38f7..6e8e0e4 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -21,6 +21,7 @@ t/06_loop_once.t
t/07_loop_timer.t
t/08_async.t
t/09_brandon.t
+#t/10_nheap.t # too timing-sensitive
t/11_signal.t
libev/LICENSE
@@ -35,6 +36,7 @@ libev/ev_select.c
libev/ev_poll.c
libev/ev_epoll.c
libev/ev_linuxaio.c
+libev/ev_iouring.c
libev/ev_kqueue.c
libev/ev_port.c
libev/ev.pod
diff --git a/META.json b/META.json
index 38fe751..9a515d4 100644
--- a/META.json
+++ b/META.json
@@ -38,5 +38,5 @@
}
},
"release_status" : "stable",
- "version" : 4.27
+ "version" : "4.30"
}
diff --git a/META.yml b/META.yml
index 5fc74b4..f6c48c1 100644
--- a/META.yml
+++ b/META.yml
@@ -20,4 +20,4 @@ no_index:
- inc
requires:
common::sense: '0'
-version: 4.27
+version: '4.30'
diff --git a/Makefile.PL b/Makefile.PL
index 23848b2..e85219e 100644
--- a/Makefile.PL
+++ b/Makefile.PL
@@ -9,7 +9,7 @@ sub have_inc($) {
my $DEFINE;
-unless (-e "libev/ev_linuxaio.c") {
+unless (-e "libev/ev_iouring.c") {
print <<EOF;
***
@@ -115,7 +115,7 @@ print <<EOF;
The second very portable backend is poll(2). It does not exist on windows
and various versions of Mac OS X (and on the other versions it simply
doesn't work), but works basically everywhere else. It is recommended to use
-the default here unless you run into compile problems in ev_poll.c.
+the default here unless you run into compilation problems in ev_poll.c.
EOF
@@ -187,13 +187,54 @@ print <<EOF;
*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***
-Similarly to the epoll backend above, EV can take advantage of kqueue on
-many BSD systems. Support for kqueue will be detected at runtime, with a
-safe fallback to other methods when it cannot be used.
+Linux 4.19 introduced another event polling interface, "io_uring". While
+this API is far superior to epoll and almost rivals linuxaio, it also
+suffers from the same issues as kqueue typically does: only a subset of
+file types are supported (as of 5.2). It is also very buggy still, and
+most importantly, very very slow for most workloads. Therefore, this
+backend is not used by default, even when it is compiled in, and you have
+to request it explicitly, e.g. with LIBEV_FLAGS=128. If unsure, accept the
+default.
+
+EOF
+
+my $can_iouring = have_inc "linux/fs.h";
+$can_iouring = $ENV{EV_IOURING} if exists $ENV{EV_IOURING};
+$can_iouring = 0 + (prompt ("Enable linux io_uring backend (y/n)?", $can_iouring ? "y" : "n") =~ /[yY]/);
+$DEFINE .= " -DEV_USE_IOURING=$can_iouring";
+
+if ($can_iouring) {
+print <<EOF;
-Note that kqueue is broken on most operating systems, so by default it
-won't be used on many platforms, but you can still create your own event
-loop with kqueue backend if you ask specifically for it.
+*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***
+
+
+The previously mentioned Linux io_uring is experimental and will not be
+used unless requested explicitly. You can, howeer, choose to make ti a
+recommended basckend, which means it will be chosen if available even when
+not explicitly asked for, in preference to epoll on GNU/Linux. This option
+is likely temporary. When unsure, accept the default.
+
+EOF
+
+my $recommend_iouring = 0;
+$recommend_iouring = $ENV{EV_RECOMMEND_IOURING} if exists $ENV{EV_RECOMMEND_IOURING};
+$recommend_iouring = 0 + (prompt ("Treat io_uring as a recommended backend (y/n)?", $recommend_iouring ? "y" : "n") =~ /[yY]/);
+$DEFINE .= " -DEV_RECOMMEND_IOURING=$recommend_iouring";
+}
+
+print <<EOF;
+
+*** *** *** *** *** *** *** *** *** *** *** *** *** *** *** *** *** ***
+
+
+EV can take advantage of kqueue on many BSD systems. Support for kqueue
+will be detected at runtime, with a safe fallback to other methods when it
+cannot be used.
+
+Note that kqueue is subtly broken on most operating systems, so by default
+it won't be used on many platforms, but you can still create your own
+event loop with kqueue backend if you ask specifically for it.
Here is what we know:
@@ -206,7 +247,8 @@ OS X: completely, utterly broken on at least <= 10.6.
EOF
-my $can_kqueue = have_inc "sys/event.h";
+# minix has all the header files, but no implementation. won-der-ful.
+my $can_kqueue = have_inc "sys/event.h" && $^O ne "minix";
$can_kqueue = $ENV{EV_KQUEUE} if exists $ENV{EV_KQUEUE};
$DEFINE .= " -DEV_USE_KQUEUE=" . (0 + (prompt ("Enable kqueue backend (y/n)?", $can_kqueue ? "y" : "n") =~ /[yY]/));
@@ -325,11 +367,11 @@ Very rarely, people want to tweak EV even more, e.g. to exclude
or include certain watcher types or backends. This can be done by adding
extra -D options here, or via the EV_EXTRA_DEFS environment variable.
-For example, if you run into compile problems because of missing memory
+For example, if you run into compilation problems because of missing memory
fences (or you just want extra performance), you can tell EV to not support
smp and threads via -DEV_NO_THREADS.
-Normal persons just press enter.
+Most people would just press enter.
EOF
@@ -352,7 +394,7 @@ WriteMakefile(
},
depend => {
"EV.c" => "EV/EVAPI.h "
- . "libev/ev.c libev/ev.h libev/ev_epoll.c libev/ev_select.c libev/ev_kqueue.c libev/ev_poll.c "
+ . "libev/ev.c libev/ev.h libev/ev_epoll.c libev/ev_select.c libev/ev_kqueue.c libev/ev_poll.c libev/ev_linuxaio.c "
. "libev/ev_vars.h libev/ev_wrap.h",
},
INC => "-Ilibev",
@@ -379,4 +421,3 @@ WriteMakefile(
},
);
-
diff --git a/README b/README
index e7d53af..d618946 100644
--- a/README
+++ b/README
@@ -303,7 +303,7 @@ BASIC INTERFACE
EV::feed_signal $signal
Feed a signal event into EV - unlike "EV::feed_signal_event", this
works regardless of which loop has registered the signal, and is
- mainly useful fro custom signal implementations.
+ mainly useful for custom signal implementations.
EV::set_io_collect_interval $time
$loop->set_io_collect_interval ($time)
diff --git a/debian/changelog b/debian/changelog
index 55db1ba..d0b5bd9 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,22 @@
+libev-perl (4.30-1) unstable; urgency=medium
+
+ * Team upload.
+
+ * New upstream version 4.30
+ * d/control:
+ - Declare compliance with Debian Policy 4.4.1
+ - Refresh build dependencies for cross builds
+ - Annotate test-only build dependencies with <!nocheck>
+ - Add Rules-Requires-Root field
+ * d/copyright:
+ - Refresh Debian Files stanza
+ * d/u/metadata:
+ - Add upstream metadata
+ * d/watch:
+ - Migrate to version 4 watch file format
+
+ -- Nick Morrott <nickm@debian.org> Mon, 02 Dec 2019 01:49:10 +0000
+
libev-perl (4.27-1) unstable; urgency=medium
* Team upload.
diff --git a/debian/control b/debian/control
index 1146a33..09e75af 100644
--- a/debian/control
+++ b/debian/control
@@ -6,12 +6,14 @@ Testsuite: autopkgtest-pkg-perl
Priority: optional
Build-Depends: debhelper-compat (= 12),
libcanary-stability-perl,
- libcommon-sense-perl,
- perl
-Standards-Version: 4.4.0
+ libcommon-sense-perl <!nocheck>,
+ perl-xs-dev,
+ perl:native
+Standards-Version: 4.4.1
Vcs-Browser: https://salsa.debian.org/perl-team/modules/packages/libev-perl
Vcs-Git: https://salsa.debian.org/perl-team/modules/packages/libev-perl.git
Homepage: https://metacpan.org/release/EV
+Rules-Requires-Root: no
Package: libev-perl
Architecture: any
diff --git a/debian/copyright b/debian/copyright
index 9a5c5ff..f8edab3 100644
--- a/debian/copyright
+++ b/debian/copyright
@@ -25,6 +25,7 @@ Copyright: 2011, 2011, Dmitry E. Oboukhov <unera@debian.org>,
2011, Nicholas Bamber <nicholas@periapt.co.uk>,
2011, Alessandro Ghedini <ghedo@debian.org>
2013-2018, gregor herrmann <gregoa@debian.org>
+ 2019, Nick Morrott <nickm@debian.org>
License: Artistic or GPL-2+
License: Artistic
diff --git a/debian/upstream/metadata b/debian/upstream/metadata
new file mode 100644
index 0000000..34badd3
--- /dev/null
+++ b/debian/upstream/metadata
@@ -0,0 +1,4 @@
+---
+Archive: CPAN
+Bug-Database: https://rt.cpan.org/Public/Dist/Display.html?Name=EV
+Bug-Submit: bug-EV@rt.cpan.org
diff --git a/debian/watch b/debian/watch
index 9300704..d15cd52 100644
--- a/debian/watch
+++ b/debian/watch
@@ -1,2 +1,2 @@
-version=3
-https://metacpan.org/release/EV .*/EV-v?(\d[\d.-]+)\.(?:tar(?:\.gz|\.bz2)?|tgz|zip)$
+version=4
+https://metacpan.org/release/EV .*/EV-v?@ANY_VERSION@@ARCHIVE_EXT@$
diff --git a/libev/Changes b/libev/Changes
index 9167637..ff97245 100644
--- a/libev/Changes
+++ b/libev/Changes
@@ -1,9 +1,44 @@
Revision history for libev, a high-performance and full-featured event loop.
-TODO: revisit 59.x timer in the light of mdoenr powersaving
-4.27 (EV only)
- - linux aio backend almost complete rewritten to work around its
+TODO: revisit 59.x timer in the light of modern powersaving
+TODO: maybe use timerfd to detect time jumps on linux
+TODO: document EV_TSTAMP_T
+
+4.30 (EV only)
+ - change non-autoconf test for __kernel_rwf_t by testing
+ LINUX_VERSION_CODE, the most direct test I could find.
+ - fix a bug in the io_uring backend that polled the wrong
+ backend fd, causing it to not work in many cases.
+
+4.29 (EV only)
+ - add io uring autoconf and non-autoconf detection.
+ - disable io_uring when some header files are too old.
+
+4.28 (EV only)
+ - linuxaio backend resulted in random memory corruption
+ when loop is forked.
+ - linuxaio backend might have tried to cancel an iocb
+ multiple times (was unable to trigger this).
+ - linuxaio backend now employs a generation counter to
+ avoid handling spurious events from cancelled requests.
+ - io_cancel can return EINTR, deal with it. also, assume
+ io_submit also returns EINTR.
+ - fix some other minor bugs in linuxaio backend.
+ - ev_tstamp type can now be overriden by defining EV_TSTAMP_T.
+ - cleanup: replace expect_true/false and noinline by their
+ libecb counterparts.
+ - move syscall infrastructure from ev_linuxaio.c to ev.c.
+ - prepare io_uring integration.
+ - tweak ev_floor.
+ - epoll, poll, win32 Sleep and other places that use millisecond
+ reslution now all try to round up times.
+ - solaris port backend didn't compile.
+ - abstract time constants into their macros, for more flexibility.
+
+4.27 Thu Jun 27 22:43:44 CEST 2019
+ - linux aio backend almost completely rewritten to work around its
limitations.
+ - linux aio backend now requires linux 4.19+.
- epoll backend now mandatory for linux aio backend.
- fail assertions more aggressively on invalid fd's detected
in the event loop, do not just silently fd_kill in case of
@@ -23,7 +58,7 @@ TODO: revisit 59.x timer in the light of mdoenr powersaving
4.25 Fri Dec 21 07:49:20 CET 2018
- INCOMPATIBLE CHANGE: EV_THROW was renamed to EV_NOEXCEPT
(EV_THROW still provided) and now uses noexcept on C++11 or newer.
- - move the darwin select workaround highe rin ev.c, as newer versions of
+ - move the darwin select workaround higher in ev.c, as newer versions of
darwin managed to break their broken select even more.
- ANDROID => __ANDROID__ (reported by enh@google.com).
- disable epoll_create1 on android because it has broken header files
diff --git a/libev/ev.c b/libev/ev.c
index ffa4091..9dc2a24 100644
--- a/libev/ev.c
+++ b/libev/ev.c
@@ -126,6 +126,15 @@
# define EV_USE_LINUXAIO 0
# endif
+# if HAVE_LINUX_FS_H && HAVE_SYS_TIMERFD_H && HAVE_KERNEL_RWF_T
+# ifndef EV_USE_IOURING
+# define EV_USE_IOURING EV_FEATURE_BACKENDS
+# endif
+# else
+# undef EV_USE_IOURING
+# define EV_USE_IOURING 0
+# endif
+
# if HAVE_KQUEUE && HAVE_SYS_EVENT_H
# ifndef EV_USE_KQUEUE
# define EV_USE_KQUEUE EV_FEATURE_BACKENDS
@@ -170,7 +179,7 @@
# undef EV_USE_EVENTFD
# define EV_USE_EVENTFD 0
# endif
-
+
#endif
/* OS X, in its infinite idiocy, actually HARDCODES
@@ -334,6 +343,14 @@
# endif
#endif
+#ifndef EV_USE_IOURING
+# if __linux /* later checks might disable again */
+# define EV_USE_IOURING 1
+# else
+# define EV_USE_IOURING 0
+# endif
+#endif
+
#ifndef EV_USE_INOTIFY
# if __linux && (__GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ >= 4))
# define EV_USE_INOTIFY EV_FEATURE_OS
@@ -408,6 +425,7 @@
# define clock_gettime(id, ts) syscall (SYS_clock_gettime, (id), (ts))
# undef EV_USE_MONOTONIC
# define EV_USE_MONOTONIC 1
+# define EV_NEED_SYSCALL 1
# else
# undef EV_USE_CLOCK_SYSCALL
# define EV_USE_CLOCK_SYSCALL 0
@@ -431,6 +449,14 @@
# define EV_USE_INOTIFY 0
#endif
+#if __linux && EV_USE_IOURING
+# include <linux/version.h>
+# if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+# undef EV_USE_IOURING
+# define EV_USE_IOURING 0
+# endif
+#endif
+
#if !EV_USE_NANOSLEEP
/* hp-ux has it in sys/time.h, which we unconditionally include above */
# if !defined _WIN32 && !defined __hpux
@@ -440,12 +466,29 @@
#if EV_USE_LINUXAIO
# include <sys/syscall.h>
-# if !SYS_io_getevents || !EV_USE_EPOLL /* ev_linxaio uses ev_poll.c:ev_epoll_create */
+# if SYS_io_getevents && EV_USE_EPOLL /* linuxaio backend requires epoll backend */
+# define EV_NEED_SYSCALL 1
+# else
# undef EV_USE_LINUXAIO
# define EV_USE_LINUXAIO 0
# endif
#endif
+#if EV_USE_IOURING
+# include <sys/syscall.h>
+# if !SYS_io_uring_setup && __linux && !__alpha
+# define SYS_io_uring_setup 425
+# define SYS_io_uring_enter 426
+# define SYS_io_uring_wregister 427
+# endif
+# if SYS_io_uring_setup && EV_USE_EPOLL /* iouring backend requires epoll backend */
+# define EV_NEED_SYSCALL 1
+# else
+# undef EV_USE_IOURING
+# define EV_USE_IOURING 0
+# endif
+#endif
+
#if EV_USE_INOTIFY
# include <sys/statfs.h>
# include <sys/inotify.h>
@@ -494,7 +537,7 @@ struct signalfd_siginfo
};
#endif
-/**/
+/*****************************************************************************/
#if EV_VERIFY >= 3
# define EV_FREQUENT_CHECK ev_verify (EV_A)
@@ -509,11 +552,26 @@ struct signalfd_siginfo
#define MIN_INTERVAL 0.0001220703125 /* 1/2**13, good till 4000 */
/*#define MIN_INTERVAL 0.00000095367431640625 /* 1/2**20, good till 2200 */
-#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
-#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
+#define MIN_TIMEJUMP 1. /* minimum timejump that gets detected (if monotonic clock available) */
+#define MAX_BLOCKTIME 59.743 /* never wait longer than this time (to detect time jumps) */
-#define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
-#define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
+/* find a portable timestamp that is "always" in the future but fits into time_t.
+ * this is quite hard, and we are mostly guessing - we handle 32 bit signed/unsigned time_t,
+ * and sizes larger than 32 bit, and maybe the unlikely floating point time_t */
+#define EV_TSTAMP_HUGE \
+ (sizeof (time_t) >= 8 ? 10000000000000. \
+ : 0 < (time_t)4294967295 ? 4294967295. \
+ : 2147483647.) \
+
+#ifndef EV_TS_CONST
+# define EV_TS_CONST(nv) nv
+# define EV_TS_TO_MSEC(a) a * 1e3 + 0.9999
+# define EV_TS_FROM_USEC(us) us * 1e-6
+# define EV_TV_SET(tv,t) do { tv.tv_sec = (long)t; tv.tv_usec = (long)((t - tv.tv_sec) * 1e6); } while (0)
+# define EV_TS_SET(ts,t) do { ts.tv_sec = (long)t; ts.tv_nsec = (long)((t - ts.tv_sec) * 1e9); } while (0)
+# define EV_TV_GET(tv) ((tv).tv_sec + (tv).tv_usec * 1e-6)
+# define EV_TS_GET(ts) ((ts).tv_sec + (ts).tv_nsec * 1e-9)
+#endif
/* the following is ecb.h embedded into libev - use update_ev_c to update from an external copy */
/* ECB.H BEGIN */
@@ -1538,7 +1596,7 @@ ecb_binary32_to_binary16 (uint32_t x)
#if ECB_MEMORY_FENCE_NEEDS_PTHREADS
/* if your architecture doesn't need memory fences, e.g. because it is
* single-cpu/core, or if you use libev in a project that doesn't use libev
- * from multiple threads, then you can define ECB_AVOID_PTHREADS when compiling
+ * from multiple threads, then you can define ECB_NO_THREADS when compiling
* libev, in which cases the memory fences become nops.
* alternatively, you can remove this #error and link against libpthread,
* which will then provide the memory fences.
@@ -1552,18 +1610,80 @@ ecb_binary32_to_binary16 (uint32_t x)
# define ECB_MEMORY_FENCE_RELEASE ECB_MEMORY_FENCE
#endif
-#define expect_false(cond) ecb_expect_false (cond)
-#define expect_true(cond) ecb_expect_true (cond)
-#define noinline ecb_noinline
-
#define inline_size ecb_inline
#if EV_FEATURE_CODE
# define inline_speed ecb_inline
#else
-# define inline_speed noinline static
+# define inline_speed ecb_noinline static
#endif
+/*****************************************************************************/
+/* raw syscall wrappers */
+
+#if EV_NEED_SYSCALL
+
+#include <sys/syscall.h>
+
+/*
+ * define some syscall wrappers for common architectures
+ * this is mostly for nice looks during debugging, not performance.
+ * our syscalls return < 0, not == -1, on error. which is good
+ * enough for linux aio.
+ * TODO: arm is also common nowadays, maybe even mips and x86
+ * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
+ */
+#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
+ /* the costly errno access probably kills this for size optimisation */
+
+ #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5,arg6) \
+ ({ \
+ long res; \
+ register unsigned long r6 __asm__ ("r9" ); \
+ register unsigned long r5 __asm__ ("r8" ); \
+ register unsigned long r4 __asm__ ("r10"); \
+ register unsigned long r3 __asm__ ("rdx"); \
+ register unsigned long r2 __asm__ ("rsi"); \
+ register unsigned long r1 __asm__ ("rdi"); \
+ if (narg >= 6) r6 = (unsigned long)(arg6); \
+ if (narg >= 5) r5 = (unsigned long)(arg5); \
+ if (narg >= 4) r4 = (unsigned long)(arg4); \
+ if (narg >= 3) r3 = (unsigned long)(arg3); \
+ if (narg >= 2) r2 = (unsigned long)(arg2); \
+ if (narg >= 1) r1 = (unsigned long)(arg1); \
+ __asm__ __volatile__ ( \
+ "syscall\n\t" \
+ : "=a" (res) \
+ : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
+ : "cc", "r11", "cx", "memory"); \
+ errno = -res; \
+ res; \
+ })
+
+#endif
+
+#ifdef ev_syscall
+ #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0, 0)
+ #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0, 0)
+ #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0, 0)
+ #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0, 0)
+ #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0, 0)
+ #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5, 0)
+ #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) ev_syscall (nr, 6, arg1, arg2, arg3, arg4, arg5,arg6)
+#else
+ #define ev_syscall0(nr) syscall (nr)
+ #define ev_syscall1(nr,arg1) syscall (nr, arg1)
+ #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
+ #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
+ #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
+ #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
+ #define ev_syscall6(nr,arg1,arg2,arg3,arg4,arg5,arg6) syscall (nr, arg1, arg2, arg3, arg4, arg5,arg6)
+#endif
+
+#endif
+
+/*****************************************************************************/
+
#define NUMPRI (EV_MAXPRI - EV_MINPRI + 1)
#if EV_MINPRI == EV_MAXPRI
@@ -1621,7 +1741,7 @@ static EV_ATOMIC_T have_monotonic; /* did clock_gettime (CLOCK_MONOTONIC) work?
#include <float.h>
/* a floor() replacement function, should be independent of ev_tstamp type */
-noinline
+ecb_noinline
static ev_tstamp
ev_floor (ev_tstamp v)
{
@@ -1632,26 +1752,26 @@ ev_floor (ev_tstamp v)
const ev_tstamp shift = sizeof (unsigned long) >= 8 ? 18446744073709551616. : 4294967296.;
#endif
- /* argument too large for an unsigned long? */
- if (expect_false (v >= shift))
+ /* special treatment for negative arguments */
+ if (ecb_expect_false (v < 0.))
+ {
+ ev_tstamp f = -ev_floor (-v);
+
+ return f - (f == v ? 0 : 1);
+ }
+
+ /* argument too large for an unsigned long? then reduce it */
+ if (ecb_expect_false (v >= shift))
{
ev_tstamp f;
if (v == v - 1.)
- return v; /* very large number */
+ return v; /* very large numbers are assumed to be integer */
f = shift * ev_floor (v * (1. / shift));
return f + ev_floor (v - f);
}
- /* special treatment for negative args? */
- if (expect_false (v < 0.))
- {
- ev_tstamp f = -ev_floor (-v);
-
- return f - (f == v ? 0 : 1);
- }
-
/* fits into an unsigned long */
return (unsigned long)v;
}
@@ -1664,7 +1784,7 @@ ev_floor (ev_tstamp v)
# include <sys/utsname.h>
#endif
-noinline ecb_cold
+ecb_noinline ecb_cold
static unsigned int
ev_linux_version (void)
{
@@ -1704,7 +1824,7 @@ ev_linux_version (void)
/*****************************************************************************/
#if EV_AVOID_STDIO
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
ev_printerr (const char *msg)
{
@@ -1721,7 +1841,7 @@ ev_set_syserr_cb (void (*cb)(const char *msg) EV_NOEXCEPT) EV_NOEXCEPT
syserr_cb = cb;
}
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
ev_syserr (const char *msg)
{
@@ -1803,7 +1923,7 @@ typedef struct
unsigned char events; /* the events watched for */
unsigned char reify; /* flag set when this ANFD needs reification (EV_ANFD_REIFY, EV__IOFDSET) */
unsigned char emask; /* some backends store the actual kernel mask in here */
- unsigned char unused;
+ unsigned char eflags; /* flags field for use by backends */
#if EV_USE_EPOLL
unsigned int egen; /* generation counter to counter epoll bugs */
#endif
@@ -1867,7 +1987,7 @@ typedef struct
#else
- EV_API_DECL ev_tstamp ev_rt_now = 0; /* needs to be initialised to make it a definition despite extern */
+ EV_API_DECL ev_tstamp ev_rt_now = EV_TS_CONST (0.); /* needs to be initialised to make it a definition despite extern */
#define VAR(name,decl) static decl;
#include "ev_vars.h"
#undef VAR
@@ -1877,8 +1997,8 @@ typedef struct
#endif
#if EV_FEATURE_API
-# define EV_RELEASE_CB if (expect_false (release_cb)) release_cb (EV_A)
-# define EV_ACQUIRE_CB if (expect_false (acquire_cb)) acquire_cb (EV_A)
+# define EV_RELEASE_CB if (ecb_expect_false (release_cb)) release_cb (EV_A)
+# define EV_ACQUIRE_CB if (ecb_expect_false (acquire_cb)) acquire_cb (EV_A)
# define EV_INVOKE_PENDING invoke_cb (EV_A)
#else
# define EV_RELEASE_CB (void)0
@@ -1895,17 +2015,19 @@ ev_tstamp
ev_time (void) EV_NOEXCEPT
{
#if EV_USE_REALTIME
- if (expect_true (have_realtime))
+ if (ecb_expect_true (have_realtime))
{
struct timespec ts;
clock_gettime (CLOCK_REALTIME, &ts);
- return ts.tv_sec + ts.tv_nsec * 1e-9;
+ return EV_TS_GET (ts);
}
#endif
- struct timeval tv;
- gettimeofday (&tv, 0);
- return tv.tv_sec + tv.tv_usec * 1e-6;
+ {
+ struct timeval tv;
+ gettimeofday (&tv, 0);
+ return EV_TV_GET (tv);
+ }
}
#endif
@@ -1913,11 +2035,11 @@ inline_size ev_tstamp
get_clock (void)
{
#if EV_USE_MONOTONIC
- if (expect_true (have_monotonic))
+ if (ecb_expect_true (have_monotonic))
{
struct timespec ts;
clock_gettime (CLOCK_MONOTONIC, &ts);
- return ts.tv_sec + ts.tv_nsec * 1e-9;
+ return EV_TS_GET (ts);
}
#endif
@@ -1935,7 +2057,7 @@ ev_now (EV_P) EV_NOEXCEPT
void
ev_sleep (ev_tstamp delay) EV_NOEXCEPT
{
- if (delay > 0.)
+ if (delay > EV_TS_CONST (0.))
{
#if EV_USE_NANOSLEEP
struct timespec ts;
@@ -1945,7 +2067,7 @@ ev_sleep (ev_tstamp delay) EV_NOEXCEPT
#elif defined _WIN32
/* maybe this should round up, as ms is very low resolution */
/* compared to select (µs) or nanosleep (ns) */
- Sleep ((unsigned long)(delay * 1e3));
+ Sleep ((unsigned long)(EV_TS_TO_MSEC (delay)));
#else
struct timeval tv;
@@ -1985,7 +2107,7 @@ array_nextsize (int elem, int cur, int cnt)
return ncur;
}
-noinline ecb_cold
+ecb_noinline ecb_cold
static void *
array_realloc (int elem, void *base, int *cur, int cnt)
{
@@ -1999,7 +2121,7 @@ array_realloc (int elem, void *base, int *cur, int cnt)
memset ((void *)(base + offset), 0, sizeof (*(base)) * (count))
#define array_needsize(type,base,cur,cnt,init) \
- if (expect_false ((cnt) > (cur))) \
+ if (ecb_expect_false ((cnt) > (cur))) \
{ \
ecb_unused int ocur_ = (cur); \
(base) = (type *)array_realloc \
@@ -2023,20 +2145,20 @@ array_realloc (int elem, void *base, int *cur, int cnt)
/*****************************************************************************/
/* dummy callback for pending events */
-noinline
+ecb_noinline
static void
pendingcb (EV_P_ ev_prepare *w, int revents)
{
}
-noinline
+ecb_noinline
void
ev_feed_event (EV_P_ void *w, int revents) EV_NOEXCEPT
{
W w_ = (W)w;
int pri = ABSPRI (w_);
- if (expect_false (w_->pending))
+ if (ecb_expect_false (w_->pending))
pendings [pri][w_->pending - 1].events |= revents;
else
{
@@ -2097,7 +2219,7 @@ fd_event (EV_P_ int fd, int revents)
{
ANFD *anfd = anfds + fd;
- if (expect_true (!anfd->reify))
+ if (ecb_expect_true (!anfd->reify))
fd_event_nocheck (EV_A_ fd, revents);
}
@@ -2151,7 +2273,7 @@ fd_reify (EV_P)
anfd->reify = 0;
- /*if (expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
+ /*if (ecb_expect_true (o_reify & EV_ANFD_REIFY)) probably a deoptimisation */
{
anfd->events = 0;
@@ -2177,7 +2299,7 @@ fd_change (EV_P_ int fd, int flags)
unsigned char reify = anfds [fd].reify;
anfds [fd].reify |= flags;
- if (expect_true (!reify))
+ if (ecb_expect_true (!reify))
{
++fdchangecnt;
array_needsize (int, fdchanges, fdchangemax, fdchangecnt, array_needsize_noinit);
@@ -2210,7 +2332,7 @@ fd_valid (int fd)
}
/* called on EBADF to verify fds */
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
fd_ebadf (EV_P)
{
@@ -2223,7 +2345,7 @@ fd_ebadf (EV_P)
}
/* called on ENOMEM in select/poll to kill some fds and retry */
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
fd_enomem (EV_P)
{
@@ -2238,7 +2360,7 @@ fd_enomem (EV_P)
}
/* usually called after fork if backend needs to re-arm all fds from scratch */
-noinline
+ecb_noinline
static void
fd_rearm_all (EV_P)
{
@@ -2302,19 +2424,19 @@ downheap (ANHE *heap, int N, int k)
ANHE *pos = heap + DHEAP * (k - HEAP0) + HEAP0 + 1;
/* find minimum child */
- if (expect_true (pos + DHEAP - 1 < E))
+ if (ecb_expect_true (pos + DHEAP - 1 < E))
{
/* fast path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
- if ( ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
- if ( ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
- if ( ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
+ if ( minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
+ if ( minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
+ if ( minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
}
else if (pos < E)
{
/* slow path */ (minpos = pos + 0), (minat = ANHE_at (*minpos));
- if (pos + 1 < E && ANHE_at (pos [1]) < minat) (minpos = pos + 1), (minat = ANHE_at (*minpos));
- if (pos + 2 < E && ANHE_at (pos [2]) < minat) (minpos = pos + 2), (minat = ANHE_at (*minpos));
- if (pos + 3 < E && ANHE_at (pos [3]) < minat) (minpos = pos + 3), (minat = ANHE_at (*minpos));
+ if (pos + 1 < E && minat > ANHE_at (pos [1])) (minpos = pos + 1), (minat = ANHE_at (*minpos));
+ if (pos + 2 < E && minat > ANHE_at (pos [2])) (minpos = pos + 2), (minat = ANHE_at (*minpos));
+ if (pos + 3 < E && minat > ANHE_at (pos [3])) (minpos = pos + 3), (minat = ANHE_at (*minpos));
}
else
break;
@@ -2332,7 +2454,7 @@ downheap (ANHE *heap, int N, int k)
ev_active (ANHE_w (he)) = k;
}
-#else /* 4HEAP */
+#else /* not 4HEAP */
#define HEAP0 1
#define HPARENT(k) ((k) >> 1)
@@ -2430,7 +2552,7 @@ static ANSIG signals [EV_NSIG - 1];
#if EV_SIGNAL_ENABLE || EV_ASYNC_ENABLE
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
evpipe_init (EV_P)
{
@@ -2481,7 +2603,7 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
{
ECB_MEMORY_FENCE; /* push out the write before this function was called, acquire flag */
- if (expect_true (*flag))
+ if (ecb_expect_true (*flag))
return;
*flag = 1;
@@ -2568,7 +2690,7 @@ pipecb (EV_P_ ev_io *iow, int revents)
ECB_MEMORY_FENCE;
for (i = EV_NSIG - 1; i--; )
- if (expect_false (signals [i].pending))
+ if (ecb_expect_false (signals [i].pending))
ev_feed_signal_event (EV_A_ i + 1);
}
#endif
@@ -2619,13 +2741,13 @@ ev_sighandler (int signum)
ev_feed_signal (signum);
}
-noinline
+ecb_noinline
void
ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
{
WL w;
- if (expect_false (signum <= 0 || signum >= EV_NSIG))
+ if (ecb_expect_false (signum <= 0 || signum >= EV_NSIG))
return;
--signum;
@@ -2634,7 +2756,7 @@ ev_feed_signal_event (EV_P_ int signum) EV_NOEXCEPT
/* it is permissible to try to feed a signal to the wrong loop */
/* or, likely more useful, feeding a signal nobody is waiting for */
- if (expect_false (signals [signum].loop != EV_A))
+ if (ecb_expect_false (signals [signum].loop != EV_A))
return;
#endif
@@ -2743,6 +2865,9 @@ childcb (EV_P_ ev_signal *sw, int revents)
#if EV_USE_LINUXAIO
# include "ev_linuxaio.c"
#endif
+#if EV_USE_IOURING
+# include "ev_iouring.c"
+#endif
#if EV_USE_POLL
# include "ev_poll.c"
#endif
@@ -2784,6 +2909,7 @@ ev_supported_backends (void) EV_NOEXCEPT
if (EV_USE_KQUEUE ) flags |= EVBACKEND_KQUEUE;
if (EV_USE_EPOLL ) flags |= EVBACKEND_EPOLL;
if (EV_USE_LINUXAIO) flags |= EVBACKEND_LINUXAIO;
+ if (EV_USE_IOURING ) flags |= EVBACKEND_IOURING;
if (EV_USE_POLL ) flags |= EVBACKEND_POLL;
if (EV_USE_SELECT ) flags |= EVBACKEND_SELECT;
@@ -2814,6 +2940,10 @@ ev_recommended_backends (void) EV_NOEXCEPT
#if !EV_RECOMMEND_LINUXAIO
flags &= ~EVBACKEND_LINUXAIO;
#endif
+ /* TODO: linuxaio is super experimental */
+#if !EV_RECOMMEND_IOURING
+ flags &= ~EVBACKEND_IOURING;
+#endif
return flags;
}
@@ -2828,6 +2958,13 @@ ev_embeddable_backends (void) EV_NOEXCEPT
if (ev_linux_version () < 0x020620) /* disable it on linux < 2.6.32 */
flags &= ~EVBACKEND_EPOLL;
+ /* EVBACKEND_LINUXAIO is theoretically embeddable, but suffers from a performance overhead */
+
+ /* EVBACKEND_IOURING is practically embeddable, but the current implementation is not
+ * because our backend_fd is the epoll fd we need as fallback.
+ * if the kernel ever is fixed, this might change...
+ */
+
return flags;
}
@@ -2889,7 +3026,7 @@ ev_set_loop_release_cb (EV_P_ void (*release)(EV_P) EV_NOEXCEPT, void (*acquire)
#endif
/* initialise a loop structure, must be zero-initialised */
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
{
@@ -2967,6 +3104,9 @@ loop_init (EV_P_ unsigned int flags) EV_NOEXCEPT
#if EV_USE_KQUEUE
if (!backend && (flags & EVBACKEND_KQUEUE )) backend = kqueue_init (EV_A_ flags);
#endif
+#if EV_USE_IOURING
+ if (!backend && (flags & EVBACKEND_IOURING )) backend = iouring_init (EV_A_ flags);
+#endif
#if EV_USE_LINUXAIO
if (!backend && (flags & EVBACKEND_LINUXAIO)) backend = linuxaio_init (EV_A_ flags);
#endif
@@ -3004,7 +3144,7 @@ ev_loop_destroy (EV_P)
#if EV_CLEANUP_ENABLE
/* queue cleanup watchers (and execute them) */
- if (expect_false (cleanupcnt))
+ if (ecb_expect_false (cleanupcnt))
{
queue_events (EV_A_ (W *)cleanups, cleanupcnt, EV_CLEANUP);
EV_INVOKE_PENDING;
@@ -3050,6 +3190,9 @@ ev_loop_destroy (EV_P)
#if EV_USE_KQUEUE
if (backend == EVBACKEND_KQUEUE ) kqueue_destroy (EV_A);
#endif
+#if EV_USE_IOURING
+ if (backend == EVBACKEND_IOURING ) iouring_destroy (EV_A);
+#endif
#if EV_USE_LINUXAIO
if (backend == EVBACKEND_LINUXAIO) linuxaio_destroy (EV_A);
#endif
@@ -3117,6 +3260,9 @@ loop_fork (EV_P)
#if EV_USE_KQUEUE
if (backend == EVBACKEND_KQUEUE ) kqueue_fork (EV_A);
#endif
+#if EV_USE_IOURING
+ if (backend == EVBACKEND_IOURING ) iouring_fork (EV_A);
+#endif
#if EV_USE_LINUXAIO
if (backend == EVBACKEND_LINUXAIO) linuxaio_fork (EV_A);
#endif
@@ -3168,7 +3314,7 @@ ev_loop_new (unsigned int flags) EV_NOEXCEPT
#endif /* multiplicity */
#if EV_VERIFY
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
verify_watcher (EV_P_ W w)
{
@@ -3178,7 +3324,7 @@ verify_watcher (EV_P_ W w)
assert (("libev: pending watcher not on pending queue", pendings [ABSPRI (w)][w->pending - 1].w == w));
}
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
verify_heap (EV_P_ ANHE *heap, int N)
{
@@ -3194,7 +3340,7 @@ verify_heap (EV_P_ ANHE *heap, int N)
}
}
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
array_verify (EV_P_ W *ws, int cnt)
{
@@ -3353,7 +3499,7 @@ ev_pending_count (EV_P) EV_NOEXCEPT
return count;
}
-noinline
+ecb_noinline
void
ev_invoke_pending (EV_P)
{
@@ -3382,7 +3528,7 @@ ev_invoke_pending (EV_P)
inline_size void
idle_reify (EV_P)
{
- if (expect_false (idleall))
+ if (ecb_expect_false (idleall))
{
int pri;
@@ -3422,7 +3568,7 @@ timers_reify (EV_P)
if (ev_at (w) < mn_now)
ev_at (w) = mn_now;
- assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
+ assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > EV_TS_CONST (0.)));
ANHE_at_cache (timers [HEAP0]);
downheap (timers, timercnt, HEAP0);
@@ -3441,7 +3587,7 @@ timers_reify (EV_P)
#if EV_PERIODIC_ENABLE
-noinline
+ecb_noinline
static void
periodic_recalc (EV_P_ ev_periodic *w)
{
@@ -3454,7 +3600,7 @@ periodic_recalc (EV_P_ ev_periodic *w)
ev_tstamp nat = at + w->interval;
/* when resolution fails us, we use ev_rt_now */
- if (expect_false (nat == at))
+ if (ecb_expect_false (nat == at))
{
at = ev_rt_now;
break;
@@ -3510,7 +3656,7 @@ periodics_reify (EV_P)
/* simply recalculate all periodics */
/* TODO: maybe ensure that at least one event happens when jumping forward? */
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
periodics_reschedule (EV_P)
{
@@ -3534,7 +3680,7 @@ periodics_reschedule (EV_P)
#endif
/* adjust all timers by a given offset */
-noinline ecb_cold
+ecb_noinline ecb_cold
static void
timers_reschedule (EV_P_ ev_tstamp adjust)
{
@@ -3554,7 +3700,7 @@ inline_speed void
time_update (EV_P_ ev_tstamp max_block)
{
#if EV_USE_MONOTONIC
- if (expect_true (have_monotonic))
+ if (ecb_expect_true (have_monotonic))
{
int i;
ev_tstamp odiff = rtmn_diff;
@@ -3563,7 +3709,7 @@ time_update (EV_P_ ev_tstamp max_block)
/* only fetch the realtime clock every 0.5*MIN_TIMEJUMP seconds */
/* interpolate in the meantime */
- if (expect_true (mn_now - now_floor < MIN_TIMEJUMP * .5))
+ if (ecb_expect_true (mn_now - now_floor < EV_TS_CONST (MIN_TIMEJUMP * .5)))
{
ev_rt_now = rtmn_diff + mn_now;
return;
@@ -3587,7 +3733,7 @@ time_update (EV_P_ ev_tstamp max_block)
diff = odiff - rtmn_diff;
- if (expect_true ((diff < 0. ? -diff : diff) < MIN_TIMEJUMP))
+ if (ecb_expect_true ((diff < EV_TS_CONST (0.) ? -diff : diff) < EV_TS_CONST (MIN_TIMEJUMP)))
return; /* all is well */
ev_rt_now = ev_time ();
@@ -3606,7 +3752,7 @@ time_update (EV_P_ ev_tstamp max_block)
{
ev_rt_now = ev_time ();
- if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
+ if (ecb_expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + EV_TS_CONST (MIN_TIMEJUMP)))
{
/* adjust timers. this is easy, as the offset is the same for all of them */
timers_reschedule (EV_A_ ev_rt_now - mn_now);
@@ -3639,8 +3785,8 @@ ev_run (EV_P_ int flags)
#endif
#ifndef _WIN32
- if (expect_false (curpid)) /* penalise the forking check even more */
- if (expect_false (getpid () != curpid))
+ if (ecb_expect_false (curpid)) /* penalise the forking check even more */
+ if (ecb_expect_false (getpid () != curpid))
{
curpid = getpid ();
postfork = 1;
@@ -3649,7 +3795,7 @@ ev_run (EV_P_ int flags)
#if EV_FORK_ENABLE
/* we might have forked, so queue fork handlers */
- if (expect_false (postfork))
+ if (ecb_expect_false (postfork))
if (forkcnt)
{
queue_events (EV_A_ (W *)forks, forkcnt, EV_FORK);
@@ -3659,18 +3805,18 @@ ev_run (EV_P_ int flags)
#if EV_PREPARE_ENABLE
/* queue prepare watchers (and execute them) */
- if (expect_false (preparecnt))
+ if (ecb_expect_false (preparecnt))
{
queue_events (EV_A_ (W *)prepares, preparecnt, EV_PREPARE);
EV_INVOKE_PENDING;
}
#endif
- if (expect_false (loop_done))
+ if (ecb_expect_false (loop_done))
break;
/* we might have forked, so reify kernel state if necessary */
- if (expect_false (postfork))
+ if (ecb_expect_false (postfork))
loop_fork (EV_A);
/* update fd-related kernel structures */
@@ -3685,16 +3831,16 @@ ev_run (EV_P_ int flags)
ev_tstamp prev_mn_now = mn_now;
/* update time to cancel out callback processing overhead */
- time_update (EV_A_ 1e100);
+ time_update (EV_A_ EV_TS_CONST (EV_TSTAMP_HUGE));
/* from now on, we want a pipe-wake-up */
pipe_write_wanted = 1;
ECB_MEMORY_FENCE; /* make sure pipe_write_wanted is visible before we check for potential skips */
- if (expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
+ if (ecb_expect_true (!(flags & EVRUN_NOWAIT || idleall || !activecnt || pipe_write_skipped)))
{
- waittime = MAX_BLOCKTIME;
+ waittime = EV_TS_CONST (MAX_BLOCKTIME);
if (timercnt)
{
@@ -3711,23 +3857,23 @@ ev_run (EV_P_ int flags)
#endif
/* don't let timeouts decrease the waittime below timeout_blocktime */
- if (expect_false (waittime < timeout_blocktime))
+ if (ecb_expect_false (waittime < timeout_blocktime))
waittime = timeout_blocktime;
/* at this point, we NEED to wait, so we have to ensure */
/* to pass a minimum nonzero value to the backend */
- if (expect_false (waittime < backend_mintime))
+ if (ecb_expect_false (waittime < backend_mintime))
waittime = backend_mintime;
/* extra check because io_blocktime is commonly 0 */
- if (expect_false (io_blocktime))
+ if (ecb_expect_false (io_blocktime))
{
sleeptime = io_blocktime - (mn_now - prev_mn_now);
if (sleeptime > waittime - backend_mintime)
sleeptime = waittime - backend_mintime;
- if (expect_true (sleeptime > 0.))
+ if (ecb_expect_true (sleeptime > EV_TS_CONST (0.)))
{
ev_sleep (sleeptime);
waittime -= sleeptime;
@@ -3768,13 +3914,13 @@ ev_run (EV_P_ int flags)
#if EV_CHECK_ENABLE
/* queue check watchers, to be executed first */
- if (expect_false (checkcnt))
+ if (ecb_expect_false (checkcnt))
queue_events (EV_A_ (W *)checks, checkcnt, EV_CHECK);
#endif
EV_INVOKE_PENDING;
}
- while (expect_true (
+ while (ecb_expect_true (
activecnt
&& !loop_done
&& !(flags & (EVRUN_ONCE | EVRUN_NOWAIT))
@@ -3811,7 +3957,7 @@ ev_unref (EV_P) EV_NOEXCEPT
void
ev_now_update (EV_P) EV_NOEXCEPT
{
- time_update (EV_A_ 1e100);
+ time_update (EV_A_ EV_TSTAMP_HUGE);
}
void
@@ -3848,7 +3994,7 @@ wlist_del (WL *head, WL elem)
{
while (*head)
{
- if (expect_true (*head == elem))
+ if (ecb_expect_true (*head == elem))
{
*head = elem->next;
break;
@@ -3875,7 +4021,7 @@ ev_clear_pending (EV_P_ void *w) EV_NOEXCEPT
W w_ = (W)w;
int pending = w_->pending;
- if (expect_true (pending))
+ if (ecb_expect_true (pending))
{
ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
p->w = (W)&pending_w;
@@ -3912,13 +4058,13 @@ ev_stop (EV_P_ W w)
/*****************************************************************************/
-noinline
+ecb_noinline
void
ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
{
int fd = w->fd;
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
assert (("libev: ev_io_start called with negative fd", fd >= 0));
@@ -3942,12 +4088,12 @@ ev_io_start (EV_P_ ev_io *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
}
-noinline
+ecb_noinline
void
ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
assert (("libev: ev_io_stop called with illegal fd (must stay constant after start!)", w->fd >= 0 && w->fd < anfdmax));
@@ -3965,11 +4111,11 @@ ev_io_stop (EV_P_ ev_io *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
}
-noinline
+ecb_noinline
void
ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
ev_at (w) += mn_now;
@@ -3990,12 +4136,12 @@ ev_timer_start (EV_P_ ev_timer *w) EV_NOEXCEPT
/*assert (("libev: internal timer heap corruption", timers [ev_active (w)] == (WT)w));*/
}
-noinline
+ecb_noinline
void
ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4007,7 +4153,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
--timercnt;
- if (expect_true (active < timercnt + HEAP0))
+ if (ecb_expect_true (active < timercnt + HEAP0))
{
timers [active] = timers [timercnt + HEAP0];
adjustheap (timers, timercnt, active);
@@ -4021,7 +4167,7 @@ ev_timer_stop (EV_P_ ev_timer *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
}
-noinline
+ecb_noinline
void
ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
{
@@ -4052,15 +4198,15 @@ ev_timer_again (EV_P_ ev_timer *w) EV_NOEXCEPT
ev_tstamp
ev_timer_remaining (EV_P_ ev_timer *w) EV_NOEXCEPT
{
- return ev_at (w) - (ev_is_active (w) ? mn_now : 0.);
+ return ev_at (w) - (ev_is_active (w) ? mn_now : EV_TS_CONST (0.));
}
#if EV_PERIODIC_ENABLE
-noinline
+ecb_noinline
void
ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
if (w->reschedule_cb)
@@ -4087,12 +4233,12 @@ ev_periodic_start (EV_P_ ev_periodic *w) EV_NOEXCEPT
/*assert (("libev: internal periodic heap corruption", ANHE_w (periodics [ev_active (w)]) == (WT)w));*/
}
-noinline
+ecb_noinline
void
ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4104,7 +4250,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
--periodiccnt;
- if (expect_true (active < periodiccnt + HEAP0))
+ if (ecb_expect_true (active < periodiccnt + HEAP0))
{
periodics [active] = periodics [periodiccnt + HEAP0];
adjustheap (periodics, periodiccnt, active);
@@ -4116,7 +4262,7 @@ ev_periodic_stop (EV_P_ ev_periodic *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
}
-noinline
+ecb_noinline
void
ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
{
@@ -4132,11 +4278,11 @@ ev_periodic_again (EV_P_ ev_periodic *w) EV_NOEXCEPT
#if EV_SIGNAL_ENABLE
-noinline
+ecb_noinline
void
ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
assert (("libev: ev_signal_start called with illegal signal number", w->signum > 0 && w->signum < EV_NSIG));
@@ -4215,12 +4361,12 @@ ev_signal_start (EV_P_ ev_signal *w) EV_NOEXCEPT
EV_FREQUENT_CHECK;
}
-noinline
+ecb_noinline
void
ev_signal_stop (EV_P_ ev_signal *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4263,7 +4409,7 @@ ev_child_start (EV_P_ ev_child *w) EV_NOEXCEPT
#if EV_MULTIPLICITY
assert (("libev: child watchers are only supported in the default loop", loop == ev_default_loop_ptr));
#endif
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4278,7 +4424,7 @@ void
ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4302,14 +4448,14 @@ ev_child_stop (EV_P_ ev_child *w) EV_NOEXCEPT
#define NFS_STAT_INTERVAL 30.1074891 /* for filesystems potentially failing inotify */
#define MIN_STAT_INTERVAL 0.1074891
-noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
+ecb_noinline static void stat_timer_cb (EV_P_ ev_timer *w_, int revents);
#if EV_USE_INOTIFY
/* the * 2 is to allow for alignment padding, which for some reason is >> 8 */
# define EV_INOTIFY_BUFSIZE (sizeof (struct inotify_event) * 2 + NAME_MAX)
-noinline
+ecb_noinline
static void
infy_add (EV_P_ ev_stat *w)
{
@@ -4384,7 +4530,7 @@ infy_add (EV_P_ ev_stat *w)
if (ev_is_active (&w->timer)) ev_unref (EV_A);
}
-noinline
+ecb_noinline
static void
infy_del (EV_P_ ev_stat *w)
{
@@ -4402,7 +4548,7 @@ infy_del (EV_P_ ev_stat *w)
inotify_rm_watch (fs_fd, wd);
}
-noinline
+ecb_noinline
static void
infy_wd (EV_P_ int slot, int wd, struct inotify_event *ev)
{
@@ -4558,7 +4704,7 @@ ev_stat_stat (EV_P_ ev_stat *w) EV_NOEXCEPT
w->attr.st_nlink = 1;
}
-noinline
+ecb_noinline
static void
stat_timer_cb (EV_P_ ev_timer *w_, int revents)
{
@@ -4602,7 +4748,7 @@ stat_timer_cb (EV_P_ ev_timer *w_, int revents)
void
ev_stat_start (EV_P_ ev_stat *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
ev_stat_stat (EV_A_ w);
@@ -4634,7 +4780,7 @@ void
ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4659,7 +4805,7 @@ ev_stat_stop (EV_P_ ev_stat *w) EV_NOEXCEPT
void
ev_idle_start (EV_P_ ev_idle *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
pri_adjust (EV_A_ (W)w);
@@ -4683,7 +4829,7 @@ void
ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4706,7 +4852,7 @@ ev_idle_stop (EV_P_ ev_idle *w) EV_NOEXCEPT
void
ev_prepare_start (EV_P_ ev_prepare *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4722,7 +4868,7 @@ void
ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4744,7 +4890,7 @@ ev_prepare_stop (EV_P_ ev_prepare *w) EV_NOEXCEPT
void
ev_check_start (EV_P_ ev_check *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4760,7 +4906,7 @@ void
ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4779,7 +4925,7 @@ ev_check_stop (EV_P_ ev_check *w) EV_NOEXCEPT
#endif
#if EV_EMBED_ENABLE
-noinline
+ecb_noinline
void
ev_embed_sweep (EV_P_ ev_embed *w) EV_NOEXCEPT
{
@@ -4841,7 +4987,7 @@ embed_idle_cb (EV_P_ ev_idle *idle, int revents)
void
ev_embed_start (EV_P_ ev_embed *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
{
@@ -4873,7 +5019,7 @@ void
ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4892,7 +5038,7 @@ ev_embed_stop (EV_P_ ev_embed *w) EV_NOEXCEPT
void
ev_fork_start (EV_P_ ev_fork *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4908,7 +5054,7 @@ void
ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4930,7 +5076,7 @@ ev_fork_stop (EV_P_ ev_fork *w) EV_NOEXCEPT
void
ev_cleanup_start (EV_P_ ev_cleanup *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4948,7 +5094,7 @@ void
ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
@@ -4971,7 +5117,7 @@ ev_cleanup_stop (EV_P_ ev_cleanup *w) EV_NOEXCEPT
void
ev_async_start (EV_P_ ev_async *w) EV_NOEXCEPT
{
- if (expect_false (ev_is_active (w)))
+ if (ecb_expect_false (ev_is_active (w)))
return;
w->sent = 0;
@@ -4991,7 +5137,7 @@ void
ev_async_stop (EV_P_ ev_async *w) EV_NOEXCEPT
{
clear_pending (EV_A_ (W)w);
- if (expect_false (!ev_is_active (w)))
+ if (ecb_expect_false (!ev_is_active (w)))
return;
EV_FREQUENT_CHECK;
diff --git a/libev/ev.h b/libev/ev.h
index f5bac00..7966051 100644
--- a/libev/ev.h
+++ b/libev/ev.h
@@ -151,7 +151,10 @@ EV_CPP(extern "C" {)
/*****************************************************************************/
-typedef double ev_tstamp;
+#ifndef EV_TSTAMP_T
+# define EV_TSTAMP_T double
+#endif
+typedef EV_TSTAMP_T ev_tstamp;
#include <string.h> /* for memmove */
@@ -522,8 +525,9 @@ enum {
EVBACKEND_KQUEUE = 0x00000008U, /* bsd, broken on osx */
EVBACKEND_DEVPOLL = 0x00000010U, /* solaris 8 */ /* NYI */
EVBACKEND_PORT = 0x00000020U, /* solaris 10 */
- EVBACKEND_LINUXAIO = 0x00000040U, /* Linuix AIO */
- EVBACKEND_ALL = 0x0000007FU, /* all known backends */
+ EVBACKEND_LINUXAIO = 0x00000040U, /* linuix AIO, 4.19+ */
+ EVBACKEND_IOURING = 0x00000080U, /* linux io_uring, 5.1+ */
+ EVBACKEND_ALL = 0x000000FFU, /* all known backends */
EVBACKEND_MASK = 0x0000FFFFU /* all future backends */
};
diff --git a/libev/ev.pod b/libev/ev.pod
index 2299e6f..fc8cdce 100644
--- a/libev/ev.pod
+++ b/libev/ev.pod
@@ -1537,7 +1537,7 @@ Many event loops support I<watcher priorities>, which are usually small
integers that influence the ordering of event callback invocation
between watchers in some way, all else being equal.
-In libev, Watcher priorities can be set using C<ev_set_priority>. See its
+In libev, watcher priorities can be set using C<ev_set_priority>. See its
description for the more technical details such as the actual priority
range.
@@ -1751,7 +1751,7 @@ reuse the same code path.
=head3 The special problem of fork
-Some backends (epoll, kqueue, probably linuxaio) do not support C<fork ()>
+Some backends (epoll, kqueue, linuxaio, iouring) do not support C<fork ()>
at all or exhibit useless behaviour. Libev fully supports fork, but needs
to be told about it in the child if you want to continue to use it in the
child.
@@ -4486,6 +4486,7 @@ in your include path (e.g. in libev/ when using -Ilibev):
ev_poll.c only when poll backend is enabled
ev_epoll.c only when the epoll backend is enabled
ev_linuxaio.c only when the linux aio backend is enabled
+ ev_iouring.c only when the linux io_uring backend is enabled
ev_kqueue.c only when the kqueue backend is enabled
ev_port.c only when the solaris port backend is enabled
@@ -4688,10 +4689,16 @@ headers indicate GNU/Linux + Glibc 2.4 or newer, otherwise disabled.
=item EV_USE_LINUXAIO
+If defined to be C<1>, libev will compile in support for the Linux aio
+backend (C<EV_USE_EPOLL> must also be enabled). If undefined, it will be
+enabled on linux, otherwise disabled.
+
+=item EV_USE_IOURING
+
If defined to be C<1>, libev will compile in support for the Linux
-aio backend. Due to it's currenbt limitations it has to be requested
-explicitly. If undefined, it will be enabled on linux, otherwise
-disabled.
+io_uring backend (C<EV_USE_EPOLL> must also be enabled). Due to it's
+current limitations it has to be requested explicitly. If undefined, it
+will be enabled on linux, otherwise disabled.
=item EV_USE_KQUEUE
diff --git a/libev/ev_epoll.c b/libev/ev_epoll.c
index 440e46b..58cfa68 100644
--- a/libev/ev_epoll.c
+++ b/libev/ev_epoll.c
@@ -93,10 +93,10 @@ epoll_modify (EV_P_ int fd, int oev, int nev)
ev.events = (nev & EV_READ ? EPOLLIN : 0)
| (nev & EV_WRITE ? EPOLLOUT : 0);
- if (expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
+ if (ecb_expect_true (!epoll_ctl (backend_fd, oev && oldmask != nev ? EPOLL_CTL_MOD : EPOLL_CTL_ADD, fd, &ev)))
return;
- if (expect_true (errno == ENOENT))
+ if (ecb_expect_true (errno == ENOENT))
{
/* if ENOENT then the fd went away, so try to do the right thing */
if (!nev)
@@ -105,7 +105,7 @@ epoll_modify (EV_P_ int fd, int oev, int nev)
if (!epoll_ctl (backend_fd, EPOLL_CTL_ADD, fd, &ev))
return;
}
- else if (expect_true (errno == EEXIST))
+ else if (ecb_expect_true (errno == EEXIST))
{
/* EEXIST means we ignored a previous DEL, but the fd is still active */
/* if the kernel mask is the same as the new mask, we assume it hasn't changed */
@@ -115,7 +115,7 @@ epoll_modify (EV_P_ int fd, int oev, int nev)
if (!epoll_ctl (backend_fd, EPOLL_CTL_MOD, fd, &ev))
return;
}
- else if (expect_true (errno == EPERM))
+ else if (ecb_expect_true (errno == EPERM))
{
/* EPERM means the fd is always ready, but epoll is too snobbish */
/* to handle it, unlike select or poll. */
@@ -146,16 +146,16 @@ epoll_poll (EV_P_ ev_tstamp timeout)
int i;
int eventcnt;
- if (expect_false (epoll_epermcnt))
- timeout = 0.;
+ if (ecb_expect_false (epoll_epermcnt))
+ timeout = EV_TS_CONST (0.);
/* epoll wait times cannot be larger than (LONG_MAX - 999UL) / HZ msecs, which is below */
/* the default libev max wait time, however. */
EV_RELEASE_CB;
- eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, timeout * 1e3);
+ eventcnt = epoll_wait (backend_fd, epoll_events, epoll_eventmax, EV_TS_TO_MSEC (timeout));
EV_ACQUIRE_CB;
- if (expect_false (eventcnt < 0))
+ if (ecb_expect_false (eventcnt < 0))
{
if (errno != EINTR)
ev_syserr ("(libev) epoll_wait");
@@ -178,14 +178,14 @@ epoll_poll (EV_P_ ev_tstamp timeout)
* other spurious notifications will be found by epoll_ctl, below
* we assume that fd is always in range, as we never shrink the anfds array
*/
- if (expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
+ if (ecb_expect_false ((uint32_t)anfds [fd].egen != (uint32_t)(ev->data.u64 >> 32)))
{
/* recreate kernel state */
postfork |= 2;
continue;
}
- if (expect_false (got & ~want))
+ if (ecb_expect_false (got & ~want))
{
anfds [fd].emask = want;
@@ -197,6 +197,8 @@ epoll_poll (EV_P_ ev_tstamp timeout)
* above with the gencounter check (== our fd is not the event fd), and
* partially here, when epoll_ctl returns an error (== a child has the fd
* but we closed it).
+ * note: for events such as POLLHUP, where we can't know whether it refers
+ * to EV_READ or EV_WRITE, we might issue redundant EPOLL_CTL_MOD calls.
*/
ev->events = (want & EV_READ ? EPOLLIN : 0)
| (want & EV_WRITE ? EPOLLOUT : 0);
@@ -214,7 +216,7 @@ epoll_poll (EV_P_ ev_tstamp timeout)
}
/* if the receive array was full, increase its size */
- if (expect_false (eventcnt == epoll_eventmax))
+ if (ecb_expect_false (eventcnt == epoll_eventmax))
{
ev_free (epoll_events);
epoll_eventmax = array_nextsize (sizeof (struct epoll_event), epoll_eventmax, epoll_eventmax + 1);
@@ -264,7 +266,7 @@ epoll_init (EV_P_ int flags)
if ((backend_fd = epoll_epoll_create ()) < 0)
return 0;
- backend_mintime = 1e-3; /* epoll does sometimes return early, this is just to avoid the worst */
+ backend_mintime = EV_TS_CONST (1e-3); /* epoll does sometimes return early, this is just to avoid the worst */
backend_modify = epoll_modify;
backend_poll = epoll_poll;
@@ -282,8 +284,8 @@ epoll_destroy (EV_P)
array_free (epoll_eperm, EMPTY);
}
-inline_size
-void
+ecb_cold
+static void
epoll_fork (EV_P)
{
close (backend_fd);
diff --git a/libev/ev_iouring.c b/libev/ev_iouring.c
new file mode 100644
index 0000000..b80ab0e
--- /dev/null
+++ b/libev/ev_iouring.c
@@ -0,0 +1,645 @@
+/*
+ * libev linux io_uring fd activity backend
+ *
+ * Copyright (c) 2019 Marc Alexander Lehmann <libev@schmorp.de>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modifica-
+ * tion, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MER-
+ * CHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPE-
+ * CIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTH-
+ * ERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Alternatively, the contents of this file may be used under the terms of
+ * the GNU General Public License ("GPL") version 2 or any later version,
+ * in which case the provisions of the GPL are applicable instead of
+ * the above. If you wish to allow the use of your version of this file
+ * only under the terms of the GPL and not to allow others to use your
+ * version of this file under the BSD license, indicate your decision
+ * by deleting the provisions above and replace them with the notice
+ * and other provisions required by the GPL. If you do not delete the
+ * provisions above, a recipient may use your version of this file under
+ * either the BSD or the GPL.
+ */
+
+/*
+ * general notes about linux io_uring:
+ *
+ * a) it's the best interface I have seen so far. on linux.
+ * b) best is not necessarily very good.
+ * c) it's better than the aio mess, doesn't suffer from the fork problems
+ * of linux aio or epoll and so on and so on. and you could do event stuff
+ * without any syscalls. what's not to like?
+ * d) ok, it's vastly more complex, but that's ok, really.
+ * e) why 3 mmaps instead of one? one would be more space-efficient,
+ * and I can't see what benefit three would have (other than being
+ * somehow resizable/relocatable, but that's apparently not possible).
+ * f) hmm, it's practiclaly undebuggable (gdb can't access the memory, and
+ the bizarre way structure offsets are commuinicated makes it hard to
+ * just print the ring buffer heads, even *iff* the memory were visible
+ * in gdb. but then, that's also ok, really.
+ * g) well, you cannot specify a timeout when waiting for events. no,
+ * seriously, the interface doesn't support a timeout. never seen _that_
+ * before. sure, you can use a timerfd, but that's another syscall
+ * you could have avoided. overall, this bizarre omission smells
+ * like a µ-optimisation by the io_uring author for his personal
+ * applications, to the detriment of everybody else who just wants
+ * an event loop. but, umm, ok, if that's all, it could be worse.
+ * h) there is a hardcoded limit of 4096 outstanding events. okay,
+ * at least there is no arbitrary low system-wide limit...
+ * i) unlike linux aio, you *can* register more then the limit
+ * of fd events, and the kernel will "gracefully" signal an
+ * overflow, after which you could destroy and recreate the kernel
+ * state, a bit bigger, or fall back to e.g. poll. thats not
+ * totally insane, but kind of questions the point a high
+ * performance I/O framework when it doesn't really work
+ * under stress.
+ * j) but, oh my! is has exactly the same bugs as the linux aio backend,
+ * where some undocumented poll combinations just fail.
+ * so we need epoll AGAIN as a fallback. AGAIN! epoll!! and of course,
+ * this is completely undocumented, have I mantioned this already?
+ * k) overall, the *API* itself is, I dare to say, not a total trainwreck.
+ * the big isuess with it are the bugs requiring epoll, which might
+ * or might not get fixed (do I hold my breath?).
+ */
+
+#include <sys/timerfd.h>
+#include <sys/mman.h>
+#include <poll.h>
+
+#define IOURING_INIT_ENTRIES 32
+
+/*****************************************************************************/
+/* syscall wrapdadoop - this section has the raw api/abi definitions */
+
+#include <linux/fs.h>
+#include <linux/types.h>
+
+/* mostly directly taken from the kernel or documentation */
+
+struct io_uring_sqe
+{
+ __u8 opcode;
+ __u8 flags;
+ __u16 ioprio;
+ __s32 fd;
+ __u64 off;
+ __u64 addr;
+ __u32 len;
+ union {
+ __kernel_rwf_t rw_flags;
+ __u32 fsync_flags;
+ __u16 poll_events;
+ __u32 sync_range_flags;
+ __u32 msg_flags;
+ };
+ __u64 user_data;
+ union {
+ __u16 buf_index;
+ __u64 __pad2[3];
+ };
+};
+
+struct io_uring_cqe
+{
+ __u64 user_data;
+ __s32 res;
+ __u32 flags;
+};
+
+struct io_sqring_offsets
+{
+ __u32 head;
+ __u32 tail;
+ __u32 ring_mask;
+ __u32 ring_entries;
+ __u32 flags;
+ __u32 dropped;
+ __u32 array;
+ __u32 resv1;
+ __u64 resv2;
+};
+
+struct io_cqring_offsets
+{
+ __u32 head;
+ __u32 tail;
+ __u32 ring_mask;
+ __u32 ring_entries;
+ __u32 overflow;
+ __u32 cqes;
+ __u64 resv[2];
+};
+
+struct io_uring_params
+{
+ __u32 sq_entries;
+ __u32 cq_entries;
+ __u32 flags;
+ __u32 sq_thread_cpu;
+ __u32 sq_thread_idle;
+ __u32 resv[5];
+ struct io_sqring_offsets sq_off;
+ struct io_cqring_offsets cq_off;
+};
+
+#define IORING_OP_POLL_ADD 6
+#define IORING_OP_POLL_REMOVE 7
+
+#define IORING_ENTER_GETEVENTS 0x01
+
+#define IORING_OFF_SQ_RING 0x00000000ULL
+#define IORING_OFF_CQ_RING 0x08000000ULL
+#define IORING_OFF_SQES 0x10000000ULL
+
+inline_size
+int
+evsys_io_uring_setup (unsigned entries, struct io_uring_params *params)
+{
+ return ev_syscall2 (SYS_io_uring_setup, entries, params);
+}
+
+inline_size
+int
+evsys_io_uring_enter (int fd, unsigned to_submit, unsigned min_complete, unsigned flags, const sigset_t *sig, size_t sigsz)
+{
+ return ev_syscall6 (SYS_io_uring_enter, fd, to_submit, min_complete, flags, sig, sigsz);
+}
+
+/*****************************************************************************/
+/* actual backed implementation */
+
+/* we hope that volatile will make the compiler access this variables only once */
+#define EV_SQ_VAR(name) *(volatile unsigned *)((char *)iouring_sq_ring + iouring_sq_ ## name)
+#define EV_CQ_VAR(name) *(volatile unsigned *)((char *)iouring_cq_ring + iouring_cq_ ## name)
+
+/* the index array */
+#define EV_SQ_ARRAY ((unsigned *)((char *)iouring_sq_ring + iouring_sq_array))
+
+/* the submit/completion queue entries */
+#define EV_SQES ((struct io_uring_sqe *) iouring_sqes)
+#define EV_CQES ((struct io_uring_cqe *)((char *)iouring_cq_ring + iouring_cq_cqes))
+
+static
+struct io_uring_sqe *
+iouring_sqe_get (EV_P)
+{
+ unsigned tail = EV_SQ_VAR (tail);
+
+ if (tail + 1 - EV_SQ_VAR (head) > EV_SQ_VAR (ring_entries))
+ {
+ /* queue full, flush */
+ evsys_io_uring_enter (iouring_fd, iouring_to_submit, 0, 0, 0, 0);
+ iouring_to_submit = 0;
+ }
+
+ assert (("libev: io_uring queue full after flush", tail + 1 - EV_SQ_VAR (head) <= EV_SQ_VAR (ring_entries)));
+
+ return EV_SQES + (tail & EV_SQ_VAR (ring_mask));
+}
+
+inline_size
+struct io_uring_sqe *
+iouring_sqe_submit (EV_P_ struct io_uring_sqe *sqe)
+{
+ unsigned idx = sqe - EV_SQES;
+
+ EV_SQ_ARRAY [idx] = idx;
+ ECB_MEMORY_FENCE_RELEASE;
+ ++EV_SQ_VAR (tail);
+ /*ECB_MEMORY_FENCE_RELEASE; /* for the time being we assume this is not needed */
+ ++iouring_to_submit;
+}
+
+/*****************************************************************************/
+
+/* when the timerfd expires we simply note the fact,
+ * as the purpose of the timerfd is to wake us up, nothing else.
+ * the next iteration should re-set it.
+ */
+static void
+iouring_tfd_cb (EV_P_ struct ev_io *w, int revents)
+{
+ iouring_tfd_to = EV_TSTAMP_HUGE;
+}
+
+static void
+iouring_epoll_cb (EV_P_ struct ev_io *w, int revents)
+{
+ epoll_poll (EV_A_ 0);
+}
+
+/* called for full and partial cleanup */
+ecb_cold
+static int
+iouring_internal_destroy (EV_P)
+{
+ close (iouring_tfd);
+ close (iouring_fd);
+
+ if (iouring_sq_ring != MAP_FAILED) munmap (iouring_sq_ring, iouring_sq_ring_size);
+ if (iouring_cq_ring != MAP_FAILED) munmap (iouring_cq_ring, iouring_cq_ring_size);
+ if (iouring_sqes != MAP_FAILED) munmap (iouring_sqes , iouring_sqes_size );
+
+ if (ev_is_active (&iouring_epoll_w)) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_epoll_w);
+ if (ev_is_active (&iouring_tfd_w )) ev_ref (EV_A); ev_io_stop (EV_A_ &iouring_tfd_w );
+}
+
+ecb_cold
+static int
+iouring_internal_init (EV_P)
+{
+ struct io_uring_params params = { 0 };
+
+ iouring_to_submit = 0;
+
+ iouring_tfd = -1;
+ iouring_sq_ring = MAP_FAILED;
+ iouring_cq_ring = MAP_FAILED;
+ iouring_sqes = MAP_FAILED;
+
+ for (;;)
+ {
+ iouring_fd = evsys_io_uring_setup (iouring_entries, &params);
+
+ if (iouring_fd >= 0)
+ break; /* yippie */
+
+ if (errno != EINVAL)
+ return -1; /* we failed */
+
+ /* EINVAL: lots of possible reasons, but maybe
+ * it is because we hit the unqueryable hardcoded size limit
+ */
+
+ /* we hit the limit already, give up */
+ if (iouring_max_entries)
+ return -1;
+
+ /* first time we hit EINVAL? assume we hit the limit, so go back and retry */
+ iouring_entries >>= 1;
+ iouring_max_entries = iouring_entries;
+ }
+
+ iouring_sq_ring_size = params.sq_off.array + params.sq_entries * sizeof (unsigned);
+ iouring_cq_ring_size = params.cq_off.cqes + params.cq_entries * sizeof (struct io_uring_cqe);
+ iouring_sqes_size = params.sq_entries * sizeof (struct io_uring_sqe);
+
+ iouring_sq_ring = mmap (0, iouring_sq_ring_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQ_RING);
+ iouring_cq_ring = mmap (0, iouring_cq_ring_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_CQ_RING);
+ iouring_sqes = mmap (0, iouring_sqes_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, iouring_fd, IORING_OFF_SQES);
+
+ if (iouring_sq_ring == MAP_FAILED || iouring_cq_ring == MAP_FAILED || iouring_sqes == MAP_FAILED)
+ return -1;
+
+ iouring_sq_head = params.sq_off.head;
+ iouring_sq_tail = params.sq_off.tail;
+ iouring_sq_ring_mask = params.sq_off.ring_mask;
+ iouring_sq_ring_entries = params.sq_off.ring_entries;
+ iouring_sq_flags = params.sq_off.flags;
+ iouring_sq_dropped = params.sq_off.dropped;
+ iouring_sq_array = params.sq_off.array;
+
+ iouring_cq_head = params.cq_off.head;
+ iouring_cq_tail = params.cq_off.tail;
+ iouring_cq_ring_mask = params.cq_off.ring_mask;
+ iouring_cq_ring_entries = params.cq_off.ring_entries;
+ iouring_cq_overflow = params.cq_off.overflow;
+ iouring_cq_cqes = params.cq_off.cqes;
+
+ iouring_tfd = timerfd_create (CLOCK_MONOTONIC, TFD_CLOEXEC);
+
+ if (iouring_tfd < 0)
+ return iouring_tfd;
+
+ iouring_tfd_to = EV_TSTAMP_HUGE;
+
+ return 0;
+}
+
+ecb_cold
+static void
+iouring_fork (EV_P)
+{
+ iouring_internal_destroy (EV_A);
+
+ while (iouring_internal_init (EV_A) < 0)
+ ev_syserr ("(libev) io_uring_setup");
+
+ /* forking epoll should also effectively unregister all fds from the backend */
+ epoll_fork (EV_A);
+ /* epoll_fork already did this. hopefully */
+ /*fd_rearm_all (EV_A);*/
+
+ ev_io_stop (EV_A_ &iouring_epoll_w);
+ ev_io_set (EV_A_ &iouring_epoll_w, backend_fd, EV_READ);
+ ev_io_start (EV_A_ &iouring_epoll_w);
+
+ ev_io_stop (EV_A_ &iouring_tfd_w);
+ ev_io_set (EV_A_ &iouring_tfd_w, iouring_tfd, EV_READ);
+ ev_io_start (EV_A_ &iouring_tfd_w);
+}
+
+/*****************************************************************************/
+
+static void
+iouring_modify (EV_P_ int fd, int oev, int nev)
+{
+ if (ecb_expect_false (anfds [fd].eflags))
+ {
+ /* we handed this fd over to epoll, so undo this first */
+ /* we do it manually because the optimisations on epoll_modify won't do us any good */
+ epoll_ctl (iouring_fd, EPOLL_CTL_DEL, fd, 0);
+ anfds [fd].eflags = 0;
+ oev = 0;
+ }
+
+ if (oev)
+ {
+ /* we assume the sqe's are all "properly" initialised */
+ struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
+ sqe->opcode = IORING_OP_POLL_REMOVE;
+ sqe->fd = fd;
+ sqe->user_data = -1;
+ iouring_sqe_submit (EV_A_ sqe);
+
+ /* increment generation counter to avoid handling old events */
+ ++anfds [fd].egen;
+ }
+
+ if (nev)
+ {
+ struct io_uring_sqe *sqe = iouring_sqe_get (EV_A);
+ sqe->opcode = IORING_OP_POLL_ADD;
+ sqe->fd = fd;
+ sqe->user_data = (uint32_t)fd | ((__u64)(uint32_t)anfds [fd].egen << 32);
+ sqe->poll_events =
+ (nev & EV_READ ? POLLIN : 0)
+ | (nev & EV_WRITE ? POLLOUT : 0);
+ iouring_sqe_submit (EV_A_ sqe);
+ }
+}
+
+inline_size
+void
+iouring_tfd_update (EV_P_ ev_tstamp timeout)
+{
+ ev_tstamp tfd_to = mn_now + timeout;
+
+ /* we assume there will be many iterations per timer change, so
+ * we only re-set the timerfd when we have to because its expiry
+ * is too late.
+ */
+ if (ecb_expect_false (tfd_to < iouring_tfd_to))
+ {
+ struct itimerspec its;
+
+ iouring_tfd_to = tfd_to;
+ EV_TS_SET (its.it_interval, 0.);
+ EV_TS_SET (its.it_value, tfd_to);
+
+ if (timerfd_settime (iouring_tfd, TFD_TIMER_ABSTIME, &its, 0) < 0)
+ assert (("libev: iouring timerfd_settime failed", 0));
+ }
+}
+
+inline_size
+void
+iouring_process_cqe (EV_P_ struct io_uring_cqe *cqe)
+{
+ int fd = cqe->user_data & 0xffffffffU;
+ uint32_t gen = cqe->user_data >> 32;
+ int res = cqe->res;
+
+ /* ignore fd removal events, if there are any. TODO: verify */
+ if (cqe->user_data == (__u64)-1)
+ abort ();//D
+
+ assert (("libev: io_uring fd must be in-bounds", fd >= 0 && fd < anfdmax));
+
+ /* documentation lies, of course. the result value is NOT like
+ * normal syscalls, but like linux raw syscalls, i.e. negative
+ * error numbers. fortunate, as otherwise there would be no way
+ * to get error codes at all. still, why not document this?
+ */
+
+ /* ignore event if generation doesn't match */
+ /* this should actually be very rare */
+ if (ecb_expect_false (gen != (uint32_t)anfds [fd].egen))
+ return;
+
+ if (ecb_expect_false (res < 0))
+ {
+ if (res == -EINVAL)
+ {
+ /* we assume this error code means the fd/poll combination is buggy
+ * and fall back to epoll.
+ * this error code might also indicate a bug, but the kernel doesn't
+ * distinguish between those two conditions, so... sigh...
+ */
+
+ epoll_modify (EV_A_ fd, 0, anfds [fd].events);
+ }
+ else if (res == -EBADF)
+ {
+ assert (("libev: event loop rejected bad fd", res != -EBADF));
+ fd_kill (EV_A_ fd);
+ }
+ else
+ {
+ errno = -res;
+ ev_syserr ("(libev) IORING_OP_POLL_ADD");
+ }
+
+ return;
+ }
+
+ /* feed events, we do not expect or handle POLLNVAL */
+ fd_event (
+ EV_A_
+ fd,
+ (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
+ | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
+ );
+
+ /* io_uring is oneshot, so we need to re-arm the fd next iteration */
+ /* this also means we usually have to do at least one syscall per iteration */
+ anfds [fd].events = 0;
+ fd_change (EV_A_ fd, EV_ANFD_REIFY);
+}
+
+/* called when the event queue overflows */
+ecb_cold
+static void
+iouring_overflow (EV_P)
+{
+ /* we have two options, resize the queue (by tearing down
+ * everything and recreating it, or living with it
+ * and polling.
+ * we implement this by resizing tghe queue, and, if that fails,
+ * we just recreate the state on every failure, which
+ * kind of is a very inefficient poll.
+ * one danger is, due to the bios toward lower fds,
+ * we will only really get events for those, so
+ * maybe we need a poll() fallback, after all.
+ */
+ /*EV_CQ_VAR (overflow) = 0;*/ /* need to do this if we keep the state and poll manually */
+
+ fd_rearm_all (EV_A);
+
+ /* we double the size until we hit the hard-to-probe maximum */
+ if (!iouring_max_entries)
+ {
+ iouring_entries <<= 1;
+ iouring_fork (EV_A);
+ }
+ else
+ {
+ /* we hit the kernel limit, we should fall back to something else.
+ * we can either poll() a few times and hope for the best,
+ * poll always, or switch to epoll.
+ * since we use epoll anyways, go epoll.
+ */
+
+ iouring_internal_destroy (EV_A);
+
+ /* this should make it so that on return, we don'T call any uring functions */
+ iouring_to_submit = 0;
+
+ for (;;)
+ {
+ backend = epoll_init (EV_A_ 0);
+
+ if (backend)
+ break;
+
+ ev_syserr ("(libev) iouring switch to epoll");
+ }
+ }
+}
+
+/* handle any events in the completion queue, return true if there were any */
+static int
+iouring_handle_cq (EV_P)
+{
+ unsigned head, tail, mask;
+
+ head = EV_CQ_VAR (head);
+ ECB_MEMORY_FENCE_ACQUIRE;
+ tail = EV_CQ_VAR (tail);
+
+ if (head == tail)
+ return 0;
+
+ /* it can only overflow if we have events, yes, yes? */
+ if (ecb_expect_false (EV_CQ_VAR (overflow)))
+ {
+ iouring_overflow (EV_A);
+ return 1;
+ }
+
+ mask = EV_CQ_VAR (ring_mask);
+
+ do
+ iouring_process_cqe (EV_A_ &EV_CQES [head++ & mask]);
+ while (head != tail);
+
+ EV_CQ_VAR (head) = head;
+ ECB_MEMORY_FENCE_RELEASE;
+
+ return 1;
+}
+
+static void
+iouring_poll (EV_P_ ev_tstamp timeout)
+{
+ /* if we have events, no need for extra syscalls, but we might have to queue events */
+ if (iouring_handle_cq (EV_A))
+ timeout = EV_TS_CONST (0.);
+ else
+ /* no events, so maybe wait for some */
+ iouring_tfd_update (EV_A_ timeout);
+
+ /* only enter the kernel if we have something to submit, or we need to wait */
+ if (timeout || iouring_to_submit)
+ {
+ int res;
+
+ EV_RELEASE_CB;
+
+ res = evsys_io_uring_enter (iouring_fd, iouring_to_submit, 1,
+ timeout > EV_TS_CONST (0.) ? IORING_ENTER_GETEVENTS : 0, 0, 0);
+ iouring_to_submit = 0;
+
+ EV_ACQUIRE_CB;
+
+ if (ecb_expect_false (res < 0))
+ if (errno == EINTR)
+ /* ignore */;
+ else
+ ev_syserr ("(libev) iouring setup");
+ else
+ iouring_handle_cq (EV_A);
+ }
+}
+
+inline_size
+int
+iouring_init (EV_P_ int flags)
+{
+ if (!epoll_init (EV_A_ 0))
+ return 0;
+
+ iouring_entries = IOURING_INIT_ENTRIES;
+ iouring_max_entries = 0;
+
+ if (iouring_internal_init (EV_A) < 0)
+ {
+ iouring_internal_destroy (EV_A);
+ return 0;
+ }
+
+ ev_io_init (&iouring_epoll_w, iouring_epoll_cb, backend_fd, EV_READ);
+ ev_set_priority (&iouring_epoll_w, EV_MAXPRI);
+
+ ev_io_init (&iouring_tfd_w, iouring_tfd_cb, iouring_tfd, EV_READ);
+ ev_set_priority (&iouring_tfd_w, EV_MAXPRI);
+
+ ev_io_start (EV_A_ &iouring_epoll_w);
+ ev_unref (EV_A); /* watcher should not keep loop alive */
+
+ ev_io_start (EV_A_ &iouring_tfd_w);
+ ev_unref (EV_A); /* watcher should not keep loop alive */
+
+ backend_modify = iouring_modify;
+ backend_poll = iouring_poll;
+
+ return EVBACKEND_IOURING;
+}
+
+inline_size
+void
+iouring_destroy (EV_P)
+{
+ iouring_internal_destroy (EV_A);
+ epoll_destroy (EV_A);
+}
+
diff --git a/libev/ev_kqueue.c b/libev/ev_kqueue.c
index c1124c9..69c5147 100644
--- a/libev/ev_kqueue.c
+++ b/libev/ev_kqueue.c
@@ -103,7 +103,7 @@ kqueue_poll (EV_P_ ev_tstamp timeout)
EV_ACQUIRE_CB;
kqueue_changecnt = 0;
- if (expect_false (res < 0))
+ if (ecb_expect_false (res < 0))
{
if (errno != EINTR)
ev_syserr ("(libev) kqueue kevent");
@@ -115,7 +115,7 @@ kqueue_poll (EV_P_ ev_tstamp timeout)
{
int fd = kqueue_events [i].ident;
- if (expect_false (kqueue_events [i].flags & EV_ERROR))
+ if (ecb_expect_false (kqueue_events [i].flags & EV_ERROR))
{
int err = kqueue_events [i].data;
@@ -151,7 +151,7 @@ kqueue_poll (EV_P_ ev_tstamp timeout)
);
}
- if (expect_false (res == kqueue_eventmax))
+ if (ecb_expect_false (res == kqueue_eventmax))
{
ev_free (kqueue_events);
kqueue_eventmax = array_nextsize (sizeof (struct kevent), kqueue_eventmax, kqueue_eventmax + 1);
@@ -170,7 +170,7 @@ kqueue_init (EV_P_ int flags)
fcntl (backend_fd, F_SETFD, FD_CLOEXEC); /* not sure if necessary, hopefully doesn't hurt */
- backend_mintime = 1e-9; /* apparently, they did the right thing in freebsd */
+ backend_mintime = EV_TS_CONST (1e-9); /* apparently, they did the right thing in freebsd */
backend_modify = kqueue_modify;
backend_poll = kqueue_poll;
diff --git a/libev/ev_linuxaio.c b/libev/ev_linuxaio.c
index ceba8dd..3e17850 100644
--- a/libev/ev_linuxaio.c
+++ b/libev/ev_linuxaio.c
@@ -118,57 +118,6 @@ struct aio_ring
struct io_event io_events[0];
};
-/*
- * define some syscall wrappers for common architectures
- * this is mostly for nice looks during debugging, not performance.
- * our syscalls return < 0, not == -1, on error. which is good
- * enough for linux aio.
- * TODO: arm is also common nowadays, maybe even mips and x86
- * TODO: after implementing this, it suddenly looks like overkill, but its hard to remove...
- */
-#if __GNUC__ && __linux && ECB_AMD64 && !defined __OPTIMIZE_SIZE__
- /* the costly errno access probably kills this for size optimisation */
-
- #define ev_syscall(nr,narg,arg1,arg2,arg3,arg4,arg5) \
- ({ \
- long res; \
- register unsigned long r5 __asm__ ("r8" ); \
- register unsigned long r4 __asm__ ("r10"); \
- register unsigned long r3 __asm__ ("rdx"); \
- register unsigned long r2 __asm__ ("rsi"); \
- register unsigned long r1 __asm__ ("rdi"); \
- if (narg >= 5) r5 = (unsigned long)(arg5); \
- if (narg >= 4) r4 = (unsigned long)(arg4); \
- if (narg >= 3) r3 = (unsigned long)(arg3); \
- if (narg >= 2) r2 = (unsigned long)(arg2); \
- if (narg >= 1) r1 = (unsigned long)(arg1); \
- __asm__ __volatile__ ( \
- "syscall\n\t" \
- : "=a" (res) \
- : "0" (nr), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5) \
- : "cc", "r11", "cx", "memory"); \
- errno = -res; \
- res; \
- })
-
-#endif
-
-#ifdef ev_syscall
- #define ev_syscall0(nr) ev_syscall (nr, 0, 0, 0, 0, 0, 0
- #define ev_syscall1(nr,arg1) ev_syscall (nr, 1, arg1, 0, 0, 0, 0)
- #define ev_syscall2(nr,arg1,arg2) ev_syscall (nr, 2, arg1, arg2, 0, 0, 0)
- #define ev_syscall3(nr,arg1,arg2,arg3) ev_syscall (nr, 3, arg1, arg2, arg3, 0, 0)
- #define ev_syscall4(nr,arg1,arg2,arg3,arg4) ev_syscall (nr, 3, arg1, arg2, arg3, arg4, 0)
- #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) ev_syscall (nr, 5, arg1, arg2, arg3, arg4, arg5)
-#else
- #define ev_syscall0(nr) syscall (nr)
- #define ev_syscall1(nr,arg1) syscall (nr, arg1)
- #define ev_syscall2(nr,arg1,arg2) syscall (nr, arg1, arg2)
- #define ev_syscall3(nr,arg1,arg2,arg3) syscall (nr, arg1, arg2, arg3)
- #define ev_syscall4(nr,arg1,arg2,arg3,arg4) syscall (nr, arg1, arg2, arg3, arg4)
- #define ev_syscall5(nr,arg1,arg2,arg3,arg4,arg5) syscall (nr, arg1, arg2, arg3, arg4, arg5)
-#endif
-
inline_size
int
evsys_io_setup (unsigned nr_events, aio_context_t *ctx_idp)
@@ -265,7 +214,6 @@ linuxaio_array_needsize_iocbp (ANIOCBP *base, int offset, int count)
memset (iocb, 0, sizeof (*iocb));
iocb->io.aio_lio_opcode = IOCB_CMD_POLL;
- iocb->io.aio_data = offset;
iocb->io.aio_fildes = offset;
base [offset++] = iocb;
@@ -287,28 +235,48 @@ linuxaio_modify (EV_P_ int fd, int oev, int nev)
{
array_needsize (ANIOCBP, linuxaio_iocbps, linuxaio_iocbpmax, fd + 1, linuxaio_array_needsize_iocbp);
ANIOCBP iocb = linuxaio_iocbps [fd];
+ ANFD *anfd = &anfds [fd];
- if (iocb->io.aio_reqprio < 0)
+ if (ecb_expect_false (iocb->io.aio_reqprio < 0))
{
/* we handed this fd over to epoll, so undo this first */
/* we do it manually because the optimisations on epoll_modify won't do us any good */
epoll_ctl (backend_fd, EPOLL_CTL_DEL, fd, 0);
- anfds [fd].emask = 0;
+ anfd->emask = 0;
iocb->io.aio_reqprio = 0;
}
-
- if (iocb->io.aio_buf)
+ else if (ecb_expect_false (iocb->io.aio_buf))
{
- evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0);
- /* on relevant kernels, io_cancel fails with EINPROGRES if everything is fine */
- assert (("libev: linuxaio unexpected io_cancel failed", errno == EINPROGRESS));
+ /* iocb active, so cancel it first before resubmit */
+ /* this assumes we only ever get one call per fd per loop iteration */
+ for (;;)
+ {
+ /* on all relevant kernels, io_cancel fails with EINPROGRESS on "success" */
+ if (ecb_expect_false (evsys_io_cancel (linuxaio_ctx, &iocb->io, (struct io_event *)0) == 0))
+ break;
+
+ if (ecb_expect_true (errno == EINPROGRESS))
+ break;
+
+ /* the EINPROGRESS test is for nicer error message. clumsy. */
+ if (errno != EINTR)
+ {
+ assert (("libev: linuxaio unexpected io_cancel failed", errno != EINTR && errno != EINPROGRESS));
+ break;
+ }
+ }
+
+ /* increment generation counter to avoid handling old events */
+ ++anfd->egen;
}
+ iocb->io.aio_buf =
+ (nev & EV_READ ? POLLIN : 0)
+ | (nev & EV_WRITE ? POLLOUT : 0);
+
if (nev)
{
- iocb->io.aio_buf =
- (nev & EV_READ ? POLLIN : 0)
- | (nev & EV_WRITE ? POLLOUT : 0);
+ iocb->io.aio_data = (uint32_t)fd | ((__u64)(uint32_t)anfd->egen << 32);
/* queue iocb up for io_submit */
/* this assumes we only ever get one call per fd per loop iteration */
@@ -338,21 +306,26 @@ linuxaio_parse_events (EV_P_ struct io_event *ev, int nr)
{
while (nr)
{
- int fd = ev->data;
- int res = ev->res;
+ int fd = ev->data & 0xffffffff;
+ uint32_t gen = ev->data >> 32;
+ int res = ev->res;
assert (("libev: iocb fd must be in-bounds", fd >= 0 && fd < anfdmax));
- /* feed events, we do not expect or handle POLLNVAL */
- fd_event (
- EV_A_
- fd,
- (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
- | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
- );
-
- /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */
- linuxaio_fd_rearm (EV_A_ fd);
+ /* only accept events if generation counter matches */
+ if (ecb_expect_true (gen == (uint32_t)anfds [fd].egen))
+ {
+ /* feed events, we do not expect or handle POLLNVAL */
+ fd_event (
+ EV_A_
+ fd,
+ (res & (POLLOUT | POLLERR | POLLHUP) ? EV_WRITE : 0)
+ | (res & (POLLIN | POLLERR | POLLHUP) ? EV_READ : 0)
+ );
+
+ /* linux aio is oneshot: rearm fd. TODO: this does more work than strictly needed */
+ linuxaio_fd_rearm (EV_A_ fd);
+ }
--nr;
++ev;
@@ -364,21 +337,20 @@ static int
linuxaio_get_events_from_ring (EV_P)
{
struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx;
+ unsigned head, tail;
/* the kernel reads and writes both of these variables, */
/* as a C extension, we assume that volatile use here */
/* both makes reads atomic and once-only */
- unsigned head = *(volatile unsigned *)&ring->head;
- unsigned tail = *(volatile unsigned *)&ring->tail;
+ head = *(volatile unsigned *)&ring->head;
+ ECB_MEMORY_FENCE_ACQUIRE;
+ tail = *(volatile unsigned *)&ring->tail;
if (head == tail)
return 0;
- /* make sure the events up to tail are visible */
- ECB_MEMORY_FENCE_ACQUIRE;
-
/* parse all available events, but only once, to avoid starvation */
- if (tail > head) /* normal case around */
+ if (ecb_expect_true (tail > head)) /* normal case around */
linuxaio_parse_events (EV_A_ ring->io_events + head, tail - head);
else /* wrapped around */
{
@@ -399,7 +371,7 @@ linuxaio_ringbuf_valid (EV_P)
{
struct aio_ring *ring = (struct aio_ring *)linuxaio_ctx;
- return expect_true (ring->magic == AIO_RING_MAGIC)
+ return ecb_expect_true (ring->magic == AIO_RING_MAGIC)
&& ring->incompat_features == EV_AIO_RING_INCOMPAT_FEATURES
&& ring->header_length == sizeof (struct aio_ring); /* TODO: or use it to find io_event[0]? */
}
@@ -414,7 +386,7 @@ linuxaio_get_events (EV_P_ ev_tstamp timeout)
int want = 1; /* how many events to request */
int ringbuf_valid = linuxaio_ringbuf_valid (EV_A);
- if (expect_true (ringbuf_valid))
+ if (ecb_expect_true (ringbuf_valid))
{
/* if the ring buffer has any events, we don't wait or call the kernel at all */
if (linuxaio_get_events_from_ring (EV_A))
@@ -437,9 +409,7 @@ linuxaio_get_events (EV_P_ ev_tstamp timeout)
EV_RELEASE_CB;
- ts.tv_sec = (long)timeout;
- ts.tv_nsec = (long)((timeout - ts.tv_sec) * 1e9);
-
+ EV_TS_SET (ts, timeout);
res = evsys_io_getevents (linuxaio_ctx, 1, want, ioev, &ts);
EV_ACQUIRE_CB;
@@ -454,7 +424,7 @@ linuxaio_get_events (EV_P_ ev_tstamp timeout)
/* at least one event available, handle them */
linuxaio_parse_events (EV_A_ ioev, res);
- if (expect_true (ringbuf_valid))
+ if (ecb_expect_true (ringbuf_valid))
{
/* if we have a ring buffer, handle any remaining events in it */
linuxaio_get_events_from_ring (EV_A);
@@ -469,7 +439,7 @@ linuxaio_get_events (EV_P_ ev_tstamp timeout)
else
break; /* no events from the kernel, we are done */
- timeout = 0; /* only wait in the first iteration */
+ timeout = EV_TS_CONST (0.); /* only wait in the first iteration */
}
}
@@ -495,7 +465,7 @@ linuxaio_poll (EV_P_ ev_tstamp timeout)
{
int res = evsys_io_submit (linuxaio_ctx, linuxaio_submitcnt - submitted, linuxaio_submits + submitted);
- if (expect_false (res < 0))
+ if (ecb_expect_false (res < 0))
if (errno == EINVAL)
{
/* This happens for unsupported fds, officially, but in my testing,
@@ -535,16 +505,21 @@ linuxaio_poll (EV_P_ ev_tstamp timeout)
++linuxaio_iteration;
if (linuxaio_io_setup (EV_A) < 0)
{
+ /* TODO: rearm all and recreate epoll backend from scratch */
+ /* TODO: might be more prudent? */
+
/* to bad, we can't get a new aio context, go 100% epoll */
linuxaio_free_iocbp (EV_A);
ev_io_stop (EV_A_ &linuxaio_epoll_w);
ev_ref (EV_A);
linuxaio_ctx = 0;
+
+ backend = EVBACKEND_EPOLL;
backend_modify = epoll_modify;
backend_poll = epoll_poll;
}
- timeout = 0;
+ timeout = EV_TS_CONST (0.);
/* it's easiest to handle this mess in another iteration */
return;
}
@@ -555,8 +530,13 @@ linuxaio_poll (EV_P_ ev_tstamp timeout)
res = 1; /* skip this iocb */
}
+ else if (errno == EINTR) /* not seen in reality, not documented */
+ res = 0; /* silently ignore and retry */
else
- ev_syserr ("(libev) linuxaio io_submit");
+ {
+ ev_syserr ("(libev) linuxaio io_submit");
+ res = 0;
+ }
submitted += res;
}
@@ -589,13 +569,13 @@ linuxaio_init (EV_P_ int flags)
return 0;
}
- ev_io_init (EV_A_ &linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ);
+ ev_io_init (&linuxaio_epoll_w, linuxaio_epoll_cb, backend_fd, EV_READ);
ev_set_priority (&linuxaio_epoll_w, EV_MAXPRI);
ev_io_start (EV_A_ &linuxaio_epoll_w);
ev_unref (EV_A); /* watcher should not keep loop alive */
- backend_modify = linuxaio_modify;
- backend_poll = linuxaio_poll;
+ backend_modify = linuxaio_modify;
+ backend_poll = linuxaio_poll;
linuxaio_iocbpmax = 0;
linuxaio_iocbps = 0;
@@ -616,13 +596,13 @@ linuxaio_destroy (EV_P)
evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */
}
-inline_size
-void
+ecb_cold
+static void
linuxaio_fork (EV_P)
{
- /* this frees all iocbs, which is very heavy-handed */
- linuxaio_destroy (EV_A);
linuxaio_submitcnt = 0; /* all pointers were invalidated */
+ linuxaio_free_iocbp (EV_A); /* this frees all iocbs, which is very heavy-handed */
+ evsys_io_destroy (linuxaio_ctx); /* fails in child, aio context is destroyed */
linuxaio_iteration = 0; /* we start over in the child */
@@ -631,12 +611,11 @@ linuxaio_fork (EV_P)
/* forking epoll should also effectively unregister all fds from the backend */
epoll_fork (EV_A);
+ /* epoll_fork already did this. hopefully */
+ /*fd_rearm_all (EV_A);*/
ev_io_stop (EV_A_ &linuxaio_epoll_w);
ev_io_set (EV_A_ &linuxaio_epoll_w, backend_fd, EV_READ);
ev_io_start (EV_A_ &linuxaio_epoll_w);
-
- /* epoll_fork already did this. hopefully */
- /*fd_rearm_all (EV_A);*/
}
diff --git a/libev/ev_poll.c b/libev/ev_poll.c
index a3ef464..e5508dd 100644
--- a/libev/ev_poll.c
+++ b/libev/ev_poll.c
@@ -80,7 +80,7 @@ poll_modify (EV_P_ int fd, int oev, int nev)
{
pollidxs [fd] = -1;
- if (expect_true (idx < --pollcnt))
+ if (ecb_expect_true (idx < --pollcnt))
{
polls [idx] = polls [pollcnt];
pollidxs [polls [idx].fd] = idx;
@@ -95,10 +95,10 @@ poll_poll (EV_P_ ev_tstamp timeout)
int res;
EV_RELEASE_CB;
- res = poll (polls, pollcnt, timeout * 1e3);
+ res = poll (polls, pollcnt, EV_TS_TO_MSEC (timeout));
EV_ACQUIRE_CB;
- if (expect_false (res < 0))
+ if (ecb_expect_false (res < 0))
{
if (errno == EBADF)
fd_ebadf (EV_A);
@@ -112,11 +112,11 @@ poll_poll (EV_P_ ev_tstamp timeout)
{
assert (("libev: poll returned illegal result, broken BSD kernel?", p < polls + pollcnt));
- if (expect_false (p->revents)) /* this expect is debatable */
+ if (ecb_expect_false (p->revents)) /* this expect is debatable */
{
--res;
- if (expect_false (p->revents & POLLNVAL))
+ if (ecb_expect_false (p->revents & POLLNVAL))
{
assert (("libev: poll found invalid fd in poll set", 0));
fd_kill (EV_A_ p->fd);
@@ -136,7 +136,7 @@ inline_size
int
poll_init (EV_P_ int flags)
{
- backend_mintime = 1e-3;
+ backend_mintime = EV_TS_CONST (1e-3);
backend_modify = poll_modify;
backend_poll = poll_poll;
diff --git a/libev/ev_port.c b/libev/ev_port.c
index 34ab1a7..f4cd9d9 100644
--- a/libev/ev_port.c
+++ b/libev/ev_port.c
@@ -70,7 +70,7 @@ port_associate_and_check (EV_P_ int fd, int ev)
{
if (errno == EBADFD)
{
- assert (("libev: port_associate found invalid fd", errno != EBADFD);
+ assert (("libev: port_associate found invalid fd", errno != EBADFD));
fd_kill (EV_A_ fd);
}
else
@@ -132,7 +132,7 @@ port_poll (EV_P_ ev_tstamp timeout)
}
}
- if (expect_false (nget == port_eventmax))
+ if (ecb_expect_false (nget == port_eventmax))
{
ev_free (port_events);
port_eventmax = array_nextsize (sizeof (port_event_t), port_eventmax, port_eventmax + 1);
@@ -154,11 +154,11 @@ port_init (EV_P_ int flags)
/* if my reading of the opensolaris kernel sources are correct, then
* opensolaris does something very stupid: it checks if the time has already
- * elapsed and doesn't round up if that is the case,m otherwise it DOES round
+ * elapsed and doesn't round up if that is the case, otherwise it DOES round
* up. Since we can't know what the case is, we need to guess by using a
* "large enough" timeout. Normally, 1e-9 would be correct.
*/
- backend_mintime = 1e-3; /* needed to compensate for port_getn returning early */
+ backend_mintime = EV_TS_CONST (1e-3); /* needed to compensate for port_getn returning early */
backend_modify = port_modify;
backend_poll = port_poll;
diff --git a/libev/ev_select.c b/libev/ev_select.c
index ed1fc7a..b862c81 100644
--- a/libev/ev_select.c
+++ b/libev/ev_select.c
@@ -108,7 +108,7 @@ select_modify (EV_P_ int fd, int oev, int nev)
int word = fd / NFDBITS;
fd_mask mask = 1UL << (fd % NFDBITS);
- if (expect_false (vec_max <= word))
+ if (ecb_expect_false (vec_max <= word))
{
int new_max = word + 1;
@@ -171,7 +171,7 @@ select_poll (EV_P_ ev_tstamp timeout)
#endif
EV_ACQUIRE_CB;
- if (expect_false (res < 0))
+ if (ecb_expect_false (res < 0))
{
#if EV_SELECT_IS_WINSOCKET
errno = WSAGetLastError ();
@@ -197,7 +197,7 @@ select_poll (EV_P_ ev_tstamp timeout)
{
if (timeout)
{
- unsigned long ms = timeout * 1e3;
+ unsigned long ms = EV_TS_TO_MSEC (timeout);
Sleep (ms ? ms : 1);
}
@@ -236,7 +236,7 @@ select_poll (EV_P_ ev_tstamp timeout)
if (FD_ISSET (handle, (fd_set *)vec_eo)) events |= EV_WRITE;
#endif
- if (expect_true (events))
+ if (ecb_expect_true (events))
fd_event (EV_A_ fd, events);
}
}
@@ -262,7 +262,7 @@ select_poll (EV_P_ ev_tstamp timeout)
events |= word_r & mask ? EV_READ : 0;
events |= word_w & mask ? EV_WRITE : 0;
- if (expect_true (events))
+ if (ecb_expect_true (events))
fd_event (EV_A_ word * NFDBITS + bit, events);
}
}
@@ -275,7 +275,7 @@ inline_size
int
select_init (EV_P_ int flags)
{
- backend_mintime = 1e-6;
+ backend_mintime = EV_TS_CONST (1e-6);
backend_modify = select_modify;
backend_poll = select_poll;
diff --git a/libev/ev_vars.h b/libev/ev_vars.h
index 1556e6b..44b5bbd 100644
--- a/libev/ev_vars.h
+++ b/libev/ev_vars.h
@@ -109,7 +109,6 @@ VARx(int, epoll_epermmax)
#if EV_USE_LINUXAIO || EV_GENWRAP
VARx(aio_context_t, linuxaio_ctx)
-VARx(char, linuxaio_ringbuf_valid)
VARx(int, linuxaio_iteration)
VARx(struct aniocb **, linuxaio_iocbps)
VARx(int, linuxaio_iocbpmax)
@@ -119,6 +118,36 @@ VARx(int, linuxaio_submitmax)
VARx(ev_io, linuxaio_epoll_w)
#endif
+#if EV_USE_IOURING || EV_GENWRAP
+VARx(int, iouring_fd)
+VARx(unsigned, iouring_to_submit);
+VARx(int, iouring_entries)
+VARx(int, iouring_max_entries)
+VARx(void *, iouring_sq_ring)
+VARx(void *, iouring_cq_ring)
+VARx(void *, iouring_sqes)
+VARx(uint32_t, iouring_sq_ring_size)
+VARx(uint32_t, iouring_cq_ring_size)
+VARx(uint32_t, iouring_sqes_size)
+VARx(uint32_t, iouring_sq_head)
+VARx(uint32_t, iouring_sq_tail)
+VARx(uint32_t, iouring_sq_ring_mask)
+VARx(uint32_t, iouring_sq_ring_entries)
+VARx(uint32_t, iouring_sq_flags)
+VARx(uint32_t, iouring_sq_dropped)
+VARx(uint32_t, iouring_sq_array)
+VARx(uint32_t, iouring_cq_head)
+VARx(uint32_t, iouring_cq_tail)
+VARx(uint32_t, iouring_cq_ring_mask)
+VARx(uint32_t, iouring_cq_ring_entries)
+VARx(uint32_t, iouring_cq_overflow)
+VARx(uint32_t, iouring_cq_cqes)
+VARx(ev_tstamp, iouring_tfd_to)
+VARx(int, iouring_tfd)
+VARx(ev_io, iouring_tfd_w)
+VARx(ev_io, iouring_epoll_w)
+#endif
+
#if EV_USE_KQUEUE || EV_GENWRAP
VARx(pid_t, kqueue_fd_pid)
VARx(struct kevent *, kqueue_changes)
diff --git a/libev/ev_win32.c b/libev/ev_win32.c
index fd67135..97344c3 100644
--- a/libev/ev_win32.c
+++ b/libev/ev_win32.c
@@ -154,8 +154,8 @@ ev_time (void)
ui.u.LowPart = ft.dwLowDateTime;
ui.u.HighPart = ft.dwHighDateTime;
- /* msvc cannot convert ulonglong to double... yes, it is that sucky */
- return (LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-7;
+ /* also, msvc cannot convert ulonglong to double... yes, it is that sucky */
+ return EV_TS_FROM_USEC (((LONGLONG)(ui.QuadPart - 116444736000000000) * 1e-1));
}
#endif
diff --git a/libev/ev_wrap.h b/libev/ev_wrap.h
index 90bb0a1..e6b7cb3 100644
--- a/libev/ev_wrap.h
+++ b/libev/ev_wrap.h
@@ -44,6 +44,33 @@
#define invoke_cb ((loop)->invoke_cb)
#define io_blocktime ((loop)->io_blocktime)
#define iocp ((loop)->iocp)
+#define iouring_cq_cqes ((loop)->iouring_cq_cqes)
+#define iouring_cq_head ((loop)->iouring_cq_head)
+#define iouring_cq_overflow ((loop)->iouring_cq_overflow)
+#define iouring_cq_ring ((loop)->iouring_cq_ring)
+#define iouring_cq_ring_entries ((loop)->iouring_cq_ring_entries)
+#define iouring_cq_ring_mask ((loop)->iouring_cq_ring_mask)
+#define iouring_cq_ring_size ((loop)->iouring_cq_ring_size)
+#define iouring_cq_tail ((loop)->iouring_cq_tail)
+#define iouring_entries ((loop)->iouring_entries)
+#define iouring_epoll_w ((loop)->iouring_epoll_w)
+#define iouring_fd ((loop)->iouring_fd)
+#define iouring_max_entries ((loop)->iouring_max_entries)
+#define iouring_sq_array ((loop)->iouring_sq_array)
+#define iouring_sq_dropped ((loop)->iouring_sq_dropped)
+#define iouring_sq_flags ((loop)->iouring_sq_flags)
+#define iouring_sq_head ((loop)->iouring_sq_head)
+#define iouring_sq_ring ((loop)->iouring_sq_ring)
+#define iouring_sq_ring_entries ((loop)->iouring_sq_ring_entries)
+#define iouring_sq_ring_mask ((loop)->iouring_sq_ring_mask)
+#define iouring_sq_ring_size ((loop)->iouring_sq_ring_size)
+#define iouring_sq_tail ((loop)->iouring_sq_tail)
+#define iouring_sqes ((loop)->iouring_sqes)
+#define iouring_sqes_size ((loop)->iouring_sqes_size)
+#define iouring_tfd ((loop)->iouring_tfd)
+#define iouring_tfd_to ((loop)->iouring_tfd_to)
+#define iouring_tfd_w ((loop)->iouring_tfd_w)
+#define iouring_to_submit ((loop)->iouring_to_submit)
#define kqueue_changecnt ((loop)->kqueue_changecnt)
#define kqueue_changemax ((loop)->kqueue_changemax)
#define kqueue_changes ((loop)->kqueue_changes)
@@ -151,6 +178,33 @@
#undef invoke_cb
#undef io_blocktime
#undef iocp
+#undef iouring_cq_cqes
+#undef iouring_cq_head
+#undef iouring_cq_overflow
+#undef iouring_cq_ring
+#undef iouring_cq_ring_entries
+#undef iouring_cq_ring_mask
+#undef iouring_cq_ring_size
+#undef iouring_cq_tail
+#undef iouring_entries
+#undef iouring_epoll_w
+#undef iouring_fd
+#undef iouring_max_entries
+#undef iouring_sq_array
+#undef iouring_sq_dropped
+#undef iouring_sq_flags
+#undef iouring_sq_head
+#undef iouring_sq_ring
+#undef iouring_sq_ring_entries
+#undef iouring_sq_ring_mask
+#undef iouring_sq_ring_size
+#undef iouring_sq_tail
+#undef iouring_sqes
+#undef iouring_sqes_size
+#undef iouring_tfd
+#undef iouring_tfd_to
+#undef iouring_tfd_w
+#undef iouring_to_submit
#undef kqueue_changecnt
#undef kqueue_changemax
#undef kqueue_changes
diff --git a/schmorp.h b/schmorp.h
index 0d23972..bc6faf0 100644
--- a/schmorp.h
+++ b/schmorp.h
@@ -57,10 +57,12 @@
typedef IV VAL64;
# define SvVAL64(sv) SvIV (sv)
# define newSVval64(i64) newSViv (i64)
+# define sv_setval64(sv,i64) sv_setiv ((sv), (i64))
#else
typedef NV VAL64;
# define SvVAL64(sv) SvNV (sv)
# define newSVval64(i64) newSVnv (i64)
+# define sv_setval64(sv,i64) sv_setnv ((sv), (i64))
#endif
/* typemap for the above */