diff --git a/src/coeio.cc b/src/coeio.cc index 3490dba7be531d8ebc913cf005124a0bf9f9f949..3950c7a492a7be907f3797e3d4cb7098d8b2abe8 100644 --- a/src/coeio.cc +++ b/src/coeio.cc @@ -98,6 +98,11 @@ coeio_want_poll_cb(void) ev_async_send(coeio_manager.loop, &coeio_manager.coeio_async); } +static void +coeio_done_poll_cb(void) +{ +} + /** * Init coeio subsystem. * @@ -106,7 +111,7 @@ coeio_want_poll_cb(void) void coeio_init(void) { - eio_init(coeio_want_poll_cb, NULL); + eio_init(coeio_want_poll_cb, coeio_done_poll_cb); coeio_manager.loop = loop(); ev_idle_init(&coeio_manager.coeio_idle, coeio_idle_cb); diff --git a/third_party/libeio/CVS/Entries b/third_party/libeio/CVS/Entries index df3b2e43405d418ef93891b6f3c5553d3eb04ea6..040123f84a7a16cdfe6f6bcad9338962a662939f 100644 --- a/third_party/libeio/CVS/Entries +++ b/third_party/libeio/CVS/Entries @@ -1,15 +1,15 @@ -/Changes/1.53/Tue Apr 7 16:19:34 2015// -/LICENSE/1.1/Tue Nov 18 10:15:55 2014// -/Makefile.am/1.4/Tue Apr 7 16:19:34 2015// -/autogen.sh/1.4/Tue Nov 18 10:15:55 2014// -/configure.ac/1.10/Tue Nov 18 10:15:55 2014// -/demo.c/1.4/Tue Nov 18 10:15:55 2014// -/ecb.h/1.23/Tue Apr 7 16:19:34 2015// -/eio.3/1.1/Tue Nov 18 10:15:55 2014// -/eio.c/1.132/Result of merge// -/eio.h/1.54/Tue Apr 7 16:19:34 2015// -/eio.pod/1.35/Tue Apr 7 16:19:34 2015// -/libeio.m4/1.22/Tue Nov 18 10:15:55 2014// -/xthread.h/1.17/Result of merge// -/etp.c/1.2/Mon Aug 18 04:26:03 2014// +/Changes/1.54/Sun Oct 4 10:44:32 2015// +/LICENSE/1.1/Mon Aug 17 17:43:15 2015// +/Makefile.am/1.4/Mon Aug 17 17:43:15 2015// +/autogen.sh/1.4/Mon Aug 17 17:43:15 2015// +/configure.ac/1.10/Mon Aug 17 17:43:15 2015// +/demo.c/1.4/Mon Aug 17 17:43:15 2015// +/ecb.h/1.26/Sun Oct 4 10:44:32 2015// +/eio.3/1.1/Mon Aug 17 17:43:15 2015// +/eio.c/1.139/Sun Oct 4 10:44:32 2015// +/eio.h/1.56/Sun Oct 4 10:44:32 2015// +/eio.pod/1.35/Mon Aug 17 17:43:15 2015// +/etp.c/1.10/Sun Oct 4 10:44:32 2015// +/libeio.m4/1.23/Sun Oct 4 10:44:32 2015// +/xthread.h/1.18/Sun Oct 4 10:44:32 2015// D diff --git a/third_party/libeio/Changes b/third_party/libeio/Changes index eed266e2fda0d1344fdf84942c9fc6718a644a4b..88b39aa6033989aefbfb09ef2f2c0acb4d48a844 100644 --- a/third_party/libeio/Changes +++ b/third_party/libeio/Changes @@ -75,4 +75,5 @@ TODO: maybe work around 3.996gb barrier in pread/pwrite as well, maybe readahead (cygwin) provides them for a while now. - provide pread/pwrite implementations for win32. - implement aio_realpath for windows. + - add EIO_FALLOC_FL_COLLAPSE_RANGE anbd EIO_FALLOC_FL_ZERO_RANGE. diff --git a/third_party/libeio/ecb.h b/third_party/libeio/ecb.h index 2348b6a243799d397e8eb070fedb4b9161880236..33d1decdf8d0214b7ebcf2ba9f6e89cabc44bc9f 100644 --- a/third_party/libeio/ecb.h +++ b/third_party/libeio/ecb.h @@ -76,8 +76,11 @@ #endif #endif +#define ECB_GCC_AMD64 (__amd64 || __amd64__ || __x86_64 || __x86_64__) +#define ECB_MSVC_AMD64 (_M_AMD64 || _M_X64) + /* work around x32 idiocy by defining proper macros */ -#if __amd64 || __x86_64 || _M_AMD64 || _M_X64 +#if ECB_GCC_AMD64 || ECB_MSVC_AMD64 #if _ILP32 #define ECB_AMD64_X32 1 #else @@ -149,13 +152,18 @@ #define ECB_MEMORY_FENCE do { } while (0) #endif +/* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/compiler_ref/compiler_builtins.html */ +#if __xlC__ && ECB_CPP + #include <builtins.h> +#endif + #ifndef ECB_MEMORY_FENCE #if ECB_GCC_VERSION(2,5) || defined __INTEL_COMPILER || (__llvm__ && __GNUC__) || __SUNPRO_C >= 0x5110 || __SUNPRO_CC >= 0x5110 #if __i386 || __i386__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("lock; orb $0, -1(%%esp)" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") - #elif __amd64 || __amd64__ || __x86_64 || __x86_64__ + #elif ECB_GCC_AMD64 #define ECB_MEMORY_FENCE __asm__ __volatile__ ("mfence" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("") @@ -169,7 +177,7 @@ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb" : : : "memory") #elif __aarch64__ #define ECB_MEMORY_FENCE __asm__ __volatile__ ("dmb ish" : : : "memory") - #elif (__sparc || __sparc__) && !__sparcv8 + #elif (__sparc || __sparc__) && !(__sparc_v8__ || defined __sparcv8) #define ECB_MEMORY_FENCE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad | #StoreStore | #StoreLoad" : : : "memory") #define ECB_MEMORY_FENCE_ACQUIRE __asm__ __volatile__ ("membar #LoadStore | #LoadLoad" : : : "memory") #define ECB_MEMORY_FENCE_RELEASE __asm__ __volatile__ ("membar #LoadStore | #StoreStore") @@ -304,6 +312,7 @@ typedef int ecb_bool; #define ECB_CONCAT(a, b) ECB_CONCAT_(a, b) #define ECB_STRINGIFY_(a) # a #define ECB_STRINGIFY(a) ECB_STRINGIFY_(a) +#define ECB_STRINGIFY_EXPR(expr) ((expr), ECB_STRINGIFY_ (expr)) #define ecb_function_ ecb_inline @@ -350,15 +359,32 @@ typedef int ecb_bool; #define ecb_deprecated ecb_attribute ((__deprecated__)) #endif -#define ecb_noinline ecb_attribute ((__noinline__)) +#if _MSC_VER >= 1500 + #define ecb_deprecated_message(msg) __declspec (deprecated (msg)) +#elif ECB_GCC_VERSION(4,5) + #define ecb_deprecated_message(msg) ecb_attribute ((__deprecated__ (msg)) +#else + #define ecb_deprecated_message(msg) ecb_deprecated +#endif + +#if _MSC_VER >= 1400 + #define ecb_noinline __declspec (noinline) +#else + #define ecb_noinline ecb_attribute ((__noinline__)) +#endif + #define ecb_unused ecb_attribute ((__unused__)) #define ecb_const ecb_attribute ((__const__)) #define ecb_pure ecb_attribute ((__pure__)) -/* TODO http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx __declspec(noreturn) */ #if ECB_C11 || __IBMC_NORETURN - /* http://pic.dhe.ibm.com/infocenter/compbg/v121v141/topic/com.ibm.xlcpp121.bg.doc/language_ref/noreturn.html */ + /* http://www-01.ibm.com/support/knowledgecenter/SSGH3R_13.1.0/com.ibm.xlcpp131.aix.doc/language_ref/noreturn.html */ #define ecb_noreturn _Noreturn +#elif ECB_CPP11 + #define ecb_noreturn [[noreturn]] +#elif _MSC_VER >= 1200 + /* http://msdn.microsoft.com/en-us/library/k6ktzx3s.aspx */ + #define ecb_noreturn __declspec (noreturn) #else #define ecb_noreturn ecb_attribute ((__noreturn__)) #endif @@ -528,9 +554,18 @@ ecb_inline ecb_const uint64_t ecb_rotl64 (uint64_t x, unsigned int count) { retu ecb_inline ecb_const uint64_t ecb_rotr64 (uint64_t x, unsigned int count) { return (x << (64 - count)) | (x >> count); } #if ECB_GCC_VERSION(4,3) || (ECB_CLANG_BUILTIN(__builtin_bswap32) && ECB_CLANG_BUILTIN(__builtin_bswap64)) + #if ECB_GCC_VERSION(4,8) || ECB_CLANG_BUILTIN(__builtin_bswap16) + #define ecb_bswap16(x) __builtin_bswap16 (x) + #else #define ecb_bswap16(x) (__builtin_bswap32 (x) >> 16) + #endif #define ecb_bswap32(x) __builtin_bswap32 (x) #define ecb_bswap64(x) __builtin_bswap64 (x) +#elif _MSC_VER + #include <stdlib.h> + #define ecb_bswap16(x) ((uint16_t)_byteswap_ushort ((uint16_t)(x))) + #define ecb_bswap32(x) ((uint32_t)_byteswap_ulong ((uint32_t)(x))) + #define ecb_bswap64(x) ((uint64_t)_byteswap_uint64 ((uint64_t)(x))) #else ecb_function_ ecb_const uint16_t ecb_bswap16 (uint16_t x); ecb_function_ ecb_const uint16_t @@ -575,7 +610,7 @@ ecb_byteorder_helper (void) /* the reason why we have this horrible preprocessor mess */ /* is to avoid it in all cases, at least on common architectures */ /* or when using a recent enough gcc version (>= 4.6) */ -#if __i386 || __i386__ || _M_X86 || __amd64 || __amd64__ || _M_X64 +#if ((__i386 || __i386__) && !__VOS__) || _M_IX86 || ECB_GCC_AMD64 || ECB_MSVC_AMD64 return 0x44; #elif __BYTE_ORDER__ && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ return 0x44; @@ -636,7 +671,7 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he /* the only noteworthy exception is ancient armle, which uses order 43218765 */ #if 0 \ || __i386 || __i386__ \ - || __amd64 || __amd64__ || __x86_64 || __x86_64__ \ + || ECB_GCC_AMD64 \ || __powerpc__ || __ppc__ || __powerpc64__ || __ppc64__ \ || defined __s390__ || defined __s390x__ \ || defined __mips__ \ @@ -646,7 +681,7 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he || defined __m68k__ \ || defined __m88k__ \ || defined __sh__ \ - || defined _M_IX86 || defined _M_AMD64 || defined _M_IA64 \ + || defined _M_IX86 || defined ECB_MSVC_AMD64 || defined _M_IA64 \ || (defined __arm__ && (defined __ARM_EABI__ || defined __EABI__ || defined __VFP_FP__ || defined _WIN32_WCE || defined __ANDROID__)) \ || defined __aarch64__ #define ECB_STDFP 1 @@ -674,8 +709,10 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he #if ECB_C99 || _XOPEN_VERSION >= 600 || _POSIX_VERSION >= 200112L #define ecb_ldexpf(x,e) ldexpf ((x), (e)) + #define ecb_frexpf(x,e) frexpf ((x), (e)) #else - #define ecb_ldexpf(x,e) (float) ldexp ((x), (e)) + #define ecb_ldexpf(x,e) (float) ldexp ((double) (x), (e)) + #define ecb_frexpf(x,e) (float) frexp ((double) (x), (e)) #endif /* converts an ieee half/binary16 to a float */ @@ -714,7 +751,7 @@ ecb_inline ecb_const ecb_bool ecb_little_endian (void) { return ecb_byteorder_he if (x < -3.40282346638528860e+38f) return 0xff800000U; if (x != x ) return 0x7fbfffffU; - m = frexpf (x, &e) * 0x1000000U; + m = ecb_frexpf (x, &e) * 0x1000000U; r = m & 0x80000000U; diff --git a/third_party/libeio/eio.c b/third_party/libeio/eio.c index c83a467ded5ddb0391edd0bf22467938ac278d80..77cdff851b703ba18ffa9b977562a65dc90397f0 100644 --- a/third_party/libeio/eio.c +++ b/third_party/libeio/eio.c @@ -121,6 +121,7 @@ static void eio_destroy (eio_req *req); #define chmod(path,mode) _chmod (path, mode) #define dup(fd) _dup (fd) #define dup2(fd1,fd2) _dup2 (fd1, fd2) + #define pipe(fds) _pipe (fds, 4096, O_BINARY) #define fchmod(fd,mode) EIO_ENOSYS () #define chown(path,uid,gid) EIO_ENOSYS () @@ -336,25 +337,7 @@ static void eio_destroy (eio_req *req); /*****************************************************************************/ -struct tmpbuf -{ - void *ptr; - int len; -}; - -static void * -tmpbuf_get (struct tmpbuf *buf, int len) -{ - if (buf->len < len) - { - free (buf->ptr); - buf->ptr = malloc (buf->len = len); - } - - return buf->ptr; -} - -struct tmpbuf; +struct etp_tmpbuf; #if _POSIX_VERSION >= 200809L #define HAVE_AT 1 @@ -364,7 +347,7 @@ struct tmpbuf; #endif #else #define HAVE_AT 0 - static const char *wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path); + static const char *wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path); #endif struct eio_pwd @@ -384,8 +367,14 @@ struct eio_pwd #define ETP_TYPE_QUIT -1 #define ETP_TYPE_GROUP EIO_GROUP -struct etp_worker; +static void eio_nop_callback (void) { } +static void (*eio_want_poll_cb)(void) = eio_nop_callback; +static void (*eio_done_poll_cb)(void) = eio_nop_callback; + +#define ETP_WANT_POLL(pool) eio_want_poll_cb () +#define ETP_DONE_POLL(pool) eio_done_poll_cb () +struct etp_worker; #define ETP_REQ eio_req #define ETP_DESTROY(req) eio_destroy (req) static int eio_finish (eio_req *req); @@ -395,6 +384,9 @@ static void eio_execute (struct etp_worker *self, eio_req *req); #include "etp.c" +static struct etp_pool eio_pool; +#define EIO_POOL (&eio_pool) + /*****************************************************************************/ static void @@ -402,12 +394,12 @@ grp_try_feed (eio_req *grp) { while (grp->size < grp->int2 && !EIO_CANCELLED (grp)) { - grp->flags &= ~EIO_FLAG_GROUPADD; + grp->flags &= ~ETP_FLAG_GROUPADD; EIO_FEED (grp); /* stop if no progress has been made */ - if (!(grp->flags & EIO_FLAG_GROUPADD)) + if (!(grp->flags & ETP_FLAG_GROUPADD)) { grp->feed = 0; break; @@ -424,7 +416,7 @@ grp_dec (eio_req *grp) grp_try_feed (grp); /* finish, if done */ - if (!grp->size && grp->int1) + if (!grp->size && grp->flags & ETP_FLAG_DELAYED) return eio_finish (grp); else return 0; @@ -470,84 +462,84 @@ eio_finish (eio_req *req) void eio_grp_cancel (eio_req *grp) { - etp_grp_cancel (grp); + etp_grp_cancel (EIO_POOL, grp); } void eio_cancel (eio_req *req) { - etp_cancel (req); + etp_cancel (EIO_POOL, req); } void eio_submit (eio_req *req) { - etp_submit (req); + etp_submit (EIO_POOL, req); } unsigned int eio_nreqs (void) { - return etp_nreqs (); + return etp_nreqs (EIO_POOL); } unsigned int eio_nready (void) { - return etp_nready (); + return etp_nready (EIO_POOL); } unsigned int eio_npending (void) { - return etp_npending (); + return etp_npending (EIO_POOL); } unsigned int ecb_cold eio_nthreads (void) { - return etp_nthreads (); + return etp_nthreads (EIO_POOL); } void ecb_cold eio_set_max_poll_time (double nseconds) { - etp_set_max_poll_time (nseconds); + etp_set_max_poll_time (EIO_POOL, nseconds); } void ecb_cold eio_set_max_poll_reqs (unsigned int maxreqs) { - etp_set_max_poll_reqs (maxreqs); + etp_set_max_poll_reqs (EIO_POOL, maxreqs); } void ecb_cold eio_set_max_idle (unsigned int nthreads) { - etp_set_max_idle (nthreads); + etp_set_max_idle (EIO_POOL, nthreads); } void ecb_cold eio_set_idle_timeout (unsigned int seconds) { - etp_set_idle_timeout (seconds); + etp_set_idle_timeout (EIO_POOL, seconds); } void ecb_cold eio_set_min_parallel (unsigned int nthreads) { - etp_set_min_parallel (nthreads); + etp_set_min_parallel (EIO_POOL, nthreads); } void ecb_cold eio_set_max_parallel (unsigned int nthreads) { - etp_set_max_parallel (nthreads); + etp_set_max_parallel (EIO_POOL, nthreads); } int eio_poll (void) { - return etp_poll (); + return etp_poll (EIO_POOL); } /*****************************************************************************/ @@ -967,7 +959,7 @@ eio__lseek (eio_req *req) /* result will always end up in tmpbuf, there is always space for adding a 0-byte */ static int -eio__realpath (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) +eio__realpath (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path) { char *res; const char *rel = path; @@ -986,7 +978,7 @@ eio__realpath (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) if (!*rel) return -1; - res = tmpbuf_get (tmpbuf, PATH_MAX * 3); + res = etp_tmpbuf_get (tmpbuf, PATH_MAX * 3); #ifdef _WIN32 if (_access (rel, 4) != 0) return -1; @@ -1605,7 +1597,7 @@ eio__scandir (eio_req *req, etp_worker *self) /* a bit like realpath, but usually faster because it doesn'T have to return */ /* an absolute or canonical path */ static const char * -wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) +wd_expand (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path) { if (!wd || *path == '/') return path; @@ -1617,7 +1609,7 @@ wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) int l1 = wd->len; int l2 = strlen (path); - char *res = tmpbuf_get (tmpbuf, l1 + l2 + 2); + char *res = etp_tmpbuf_get (tmpbuf, l1 + l2 + 2); memcpy (res, wd->str, l1); res [l1] = '/'; @@ -1630,7 +1622,7 @@ wd_expand (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) #endif static eio_wd -eio__wd_open_sync (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) +eio__wd_open_sync (struct etp_tmpbuf *tmpbuf, eio_wd wd, const char *path) { int fd; eio_wd res; @@ -1662,7 +1654,7 @@ eio__wd_open_sync (struct tmpbuf *tmpbuf, eio_wd wd, const char *path) eio_wd eio_wd_open_sync (eio_wd wd, const char *path) { - struct tmpbuf tmpbuf = { 0 }; + struct etp_tmpbuf tmpbuf = { }; wd = eio__wd_open_sync (&tmpbuf, wd, path); free (tmpbuf.ptr); @@ -1721,9 +1713,9 @@ eio__statvfsat (int dirfd, const char *path, struct statvfs *buf) #define ALLOC(len) \ if (!req->ptr2) \ { \ - X_LOCK (wrklock); \ + X_LOCK (EIO_POOL->wrklock); \ req->flags |= EIO_FLAG_PTR2_FREE; \ - X_UNLOCK (wrklock); \ + X_UNLOCK (EIO_POOL->wrklock); \ req->ptr2 = malloc (len); \ if (!req->ptr2) \ { \ @@ -1733,112 +1725,15 @@ eio__statvfsat (int dirfd, const char *path, struct statvfs *buf) } \ } -static void ecb_noinline ecb_cold -etp_proc_init (void) -{ -#if HAVE_PRCTL_SET_NAME - /* provide a more sensible "thread name" */ - char name[16 + 1]; - const int namelen = sizeof (name) - 1; - int len; - - prctl (PR_GET_NAME, (unsigned long)name, 0, 0, 0); - name [namelen] = 0; - len = strlen (name); - strcpy (name + (len <= namelen - 4 ? len : namelen - 4), "/eio"); - prctl (PR_SET_NAME, (unsigned long)name, 0, 0, 0); -#endif -} - -/* TODO: move somehow to etp.c */ -X_THREAD_PROC (etp_proc) -{ - ETP_REQ *req; - struct timespec ts; - etp_worker *self = (etp_worker *)thr_arg; - - etp_proc_init (); - - /* try to distribute timeouts somewhat evenly */ - ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL); - - for (;;) - { - ts.tv_sec = 0; - - X_LOCK (reqlock); - - for (;;) - { - req = reqq_shift (&req_queue); - - if (req) - break; - - if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */ - { - X_UNLOCK (reqlock); - X_LOCK (wrklock); - --started; - X_UNLOCK (wrklock); - goto quit; - } - - ++idle; - - if (idle <= max_idle) - /* we are allowed to idle, so do so without any timeout */ - X_COND_WAIT (reqwait, reqlock); - else - { - /* initialise timeout once */ - if (!ts.tv_sec) - ts.tv_sec = time (0) + idle_timeout; - - if (X_COND_TIMEDWAIT (reqwait, reqlock, ts) == ETIMEDOUT) - ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */ - } - - --idle; - } - - --nready; - - X_UNLOCK (reqlock); - - if (req->type == ETP_TYPE_QUIT) - goto quit; - - ETP_EXECUTE (self, req); - - X_LOCK (reslock); - - ++npending; - - if (!reqq_push (&res_queue, req) && want_poll_cb) - want_poll_cb (); - - etp_worker_clear (self); - - X_UNLOCK (reslock); - } - -quit: - free (req); - - X_LOCK (wrklock); - etp_worker_free (self); - X_UNLOCK (wrklock); - - return 0; -} - /*****************************************************************************/ int ecb_cold eio_init (void (*want_poll)(void), void (*done_poll)(void)) { - return etp_init (want_poll, done_poll); + eio_want_poll_cb = want_poll; + eio_done_poll_cb = done_poll; + + return etp_init (EIO_POOL, 0, 0, 0); } ecb_inline void @@ -2073,8 +1968,10 @@ eio_execute (etp_worker *self, eio_req *req) #endif break; +#if 0 case EIO_GROUP: abort (); /* handled in eio_request */ +#endif case EIO_NOP: req->result = 0; @@ -2384,7 +2281,7 @@ eio_grp_add (eio_req *grp, eio_req *req) { assert (("cannot add requests to IO::AIO::GRP after the group finished", grp->int1 != 2)); - grp->flags |= EIO_FLAG_GROUPADD; + grp->flags |= ETP_FLAG_GROUPADD; ++grp->size; req->grp = grp; diff --git a/third_party/libeio/eio.h b/third_party/libeio/eio.h index b61a8855d593ea727ce32ca2930c89b5f160fba6..0987094c13ae5fd7a7134e1ff8a7c100a8a53aa2 100644 --- a/third_party/libeio/eio.h +++ b/third_party/libeio/eio.h @@ -1,7 +1,7 @@ /* * libeio API header * - * Copyright (c) 2007,2008,2009,2010,2011,2012 Marc Alexander Lehmann <libeio@schmorp.de> + * Copyright (c) 2007,2008,2009,2010,2011,2012,2015 Marc Alexander Lehmann <libeio@schmorp.de> * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- @@ -171,8 +171,10 @@ enum enum { /* these MUST match the value in linux/falloc.h */ - EIO_FALLOC_FL_KEEP_SIZE = 1, - EIO_FALLOC_FL_PUNCH_HOLE = 2 + EIO_FALLOC_FL_KEEP_SIZE = 0x01, + EIO_FALLOC_FL_PUNCH_HOLE = 0x02, + EIO_FALLOC_FL_COLLAPSE_RANGE = 0x08, + EIO_FALLOC_FL_ZERO_RANGE = 0x10 }; /* timestamps and differences - feel free to use double in your code directly */ @@ -285,7 +287,6 @@ struct eio_req enum { EIO_FLAG_PTR1_FREE = 0x01, /* need to free(ptr1) */ EIO_FLAG_PTR2_FREE = 0x02, /* need to free(ptr2) */ - EIO_FLAG_GROUPADD = 0x04 /* some request was added to the group */ }; /* undocumented/unsupported/private helper */ diff --git a/third_party/libeio/etp.c b/third_party/libeio/etp.c index f97eaccae2c20a89ac0554c5a310640a30e357bc..81f15dbb37dfecc1ea06aa32ee242ae6819269e8 100644 --- a/third_party/libeio/etp.c +++ b/third_party/libeio/etp.c @@ -1,7 +1,7 @@ /* * libetp implementation * - * Copyright (c) 2007,2008,2009,2010,2011,2012,2013 Marc Alexander Lehmann <libetp@schmorp.de> + * Copyright (c) 2007,2008,2009,2010,2011,2012,2013,2015 Marc Alexander Lehmann <libetp@schmorp.de> * All rights reserved. * * Redistribution and use in source and binary forms, with or without modifica- @@ -54,10 +54,22 @@ # define ETP_TYPE_GROUP 1 #endif +#ifndef ETP_WANT_POLL +# define ETP_WANT_POLL(pool) pool->want_poll_cb (pool->userdata) +#endif +#ifndef ETP_DONE_POLL +# define ETP_DONE_POLL(pool) pool->done_poll_cb (pool->userdata) +#endif + #define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1) #define ETP_TICKS ((1000000 + 1023) >> 10) +enum { + ETP_FLAG_GROUPADD = 0x04, /* some request was added to the group */ + ETP_FLAG_DELAYED = 0x08, /* groiup request has been delayed */ +}; + /* calculate time difference in ~1/ETP_TICKS of a second */ ecb_inline int etp_tvdiff (struct timeval *tv1, struct timeval *tv2) @@ -66,30 +78,44 @@ etp_tvdiff (struct timeval *tv1, struct timeval *tv2) + ((tv2->tv_usec - tv1->tv_usec) >> 10); } -static unsigned int started, idle, wanted = 4; +struct etp_tmpbuf +{ + void *ptr; + int len; +}; -static void (*want_poll_cb) (void); -static void (*done_poll_cb) (void); - -static unsigned int max_poll_time; /* reslock */ -static unsigned int max_poll_reqs; /* reslock */ +static void * +etp_tmpbuf_get (struct etp_tmpbuf *buf, int len) +{ + if (buf->len < len) + { + free (buf->ptr); + buf->ptr = malloc (buf->len = len); + } -static unsigned int nreqs; /* reqlock */ -static unsigned int nready; /* reqlock */ -static unsigned int npending; /* reqlock */ -static unsigned int max_idle = 4; /* maximum number of threads that can idle indefinitely */ -static unsigned int idle_timeout = 10; /* number of seconds after which an idle threads exit */ + return buf->ptr; +} -static xmutex_t wrklock; -static xmutex_t reslock; -static xmutex_t reqlock; -static xcond_t reqwait; +/* + * a somewhat faster data structure might be nice, but + * with 8 priorities this actually needs <20 insns + * per shift, the most expensive operation. + */ +typedef struct +{ + ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */ + int size; +} etp_reqq; + +typedef struct etp_pool *etp_pool; typedef struct etp_worker { - struct tmpbuf tmpbuf; + etp_pool pool; + + struct etp_tmpbuf tmpbuf; - /* locked by wrklock */ + /* locked by pool->wrklock */ struct etp_worker *prev, *next; xthread_t tid; @@ -99,10 +125,37 @@ typedef struct etp_worker #endif } etp_worker; -static etp_worker wrk_first; /* NOT etp */ +struct etp_pool +{ + void *userdata; + + etp_reqq req_queue; + etp_reqq res_queue; + + unsigned int started, idle, wanted; + + unsigned int max_poll_time; /* pool->reslock */ + unsigned int max_poll_reqs; /* pool->reslock */ + + unsigned int nreqs; /* pool->reqlock */ + unsigned int nready; /* pool->reqlock */ + unsigned int npending; /* pool->reqlock */ + unsigned int max_idle; /* maximum number of threads that can pool->idle indefinitely */ + unsigned int idle_timeout; /* number of seconds after which an pool->idle threads exit */ + + void (*want_poll_cb) (void *userdata); + void (*done_poll_cb) (void *userdata); + + xmutex_t wrklock; + xmutex_t reslock; + xmutex_t reqlock; + xcond_t reqwait; + + etp_worker wrk_first; +}; -#define ETP_WORKER_LOCK(wrk) X_LOCK (wrklock) -#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (wrklock) +#define ETP_WORKER_LOCK(wrk) X_LOCK (pool->wrklock) +#define ETP_WORKER_UNLOCK(wrk) X_UNLOCK (pool->wrklock) /* worker threads management */ @@ -123,64 +176,51 @@ etp_worker_free (etp_worker *wrk) } ETP_API_DECL unsigned int -etp_nreqs (void) +etp_nreqs (etp_pool pool) { int retval; - if (WORDACCESS_UNSAFE) X_LOCK (reqlock); - retval = nreqs; - if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); + if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); + retval = pool->nreqs; + if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); return retval; } ETP_API_DECL unsigned int -etp_nready (void) +etp_nready (etp_pool pool) { unsigned int retval; - if (WORDACCESS_UNSAFE) X_LOCK (reqlock); - retval = nready; - if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); + if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); + retval = pool->nready; + if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); return retval; } ETP_API_DECL unsigned int -etp_npending (void) +etp_npending (etp_pool pool) { unsigned int retval; - if (WORDACCESS_UNSAFE) X_LOCK (reqlock); - retval = npending; - if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); + if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); + retval = pool->npending; + if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); return retval; } ETP_API_DECL unsigned int -etp_nthreads (void) +etp_nthreads (etp_pool pool) { unsigned int retval; - if (WORDACCESS_UNSAFE) X_LOCK (reqlock); - retval = started; - if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); + if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); + retval = pool->started; + if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); return retval; } -/* - * a somewhat faster data structure might be nice, but - * with 8 priorities this actually needs <20 insns - * per shift, the most expensive operation. - */ -typedef struct { - ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */ - int size; -} etp_reqq; - -static etp_reqq req_queue; -static etp_reqq res_queue; - static void ecb_noinline ecb_cold reqq_init (etp_reqq *q) { @@ -236,100 +276,204 @@ reqq_shift (etp_reqq *q) } ETP_API_DECL int ecb_cold -etp_init (void (*want_poll)(void), void (*done_poll)(void)) +etp_init (etp_pool pool, void *userdata, void (*want_poll)(void *userdata), void (*done_poll)(void *userdata)) { - X_MUTEX_CREATE (wrklock); - X_MUTEX_CREATE (reslock); - X_MUTEX_CREATE (reqlock); - X_COND_CREATE (reqwait); + X_MUTEX_CREATE (pool->wrklock); + X_MUTEX_CREATE (pool->reslock); + X_MUTEX_CREATE (pool->reqlock); + X_COND_CREATE (pool->reqwait); + + reqq_init (&pool->req_queue); + reqq_init (&pool->res_queue); - reqq_init (&req_queue); - reqq_init (&res_queue); + pool->wrk_first.next = + pool->wrk_first.prev = &pool->wrk_first; - wrk_first.next = - wrk_first.prev = &wrk_first; + pool->started = 0; + pool->idle = 0; + pool->nreqs = 0; + pool->nready = 0; + pool->npending = 0; + pool->wanted = 4; - started = 0; - idle = 0; - nreqs = 0; - nready = 0; - npending = 0; + pool->max_idle = 4; /* maximum number of threads that can pool->idle indefinitely */ + pool->idle_timeout = 10; /* number of seconds after which an pool->idle threads exit */ - want_poll_cb = want_poll; - done_poll_cb = done_poll; + pool->userdata = userdata; + pool->want_poll_cb = want_poll; + pool->done_poll_cb = done_poll; return 0; } -/* not yet in etp.c */ -X_THREAD_PROC (etp_proc); +static void ecb_noinline ecb_cold +etp_proc_init (void) +{ +#if HAVE_PRCTL_SET_NAME + /* provide a more sensible "thread name" */ + char name[16 + 1]; + const int namelen = sizeof (name) - 1; + int len; + + prctl (PR_GET_NAME, (unsigned long)name, 0, 0, 0); + name [namelen] = 0; + len = strlen (name); + strcpy (name + (len <= namelen - 4 ? len : namelen - 4), "/eio"); + prctl (PR_SET_NAME, (unsigned long)name, 0, 0, 0); +#endif +} + +X_THREAD_PROC (etp_proc) +{ + ETP_REQ *req; + struct timespec ts; + etp_worker *self = (etp_worker *)thr_arg; + etp_pool pool = self->pool; + + etp_proc_init (); + + /* try to distribute timeouts somewhat evenly */ + ts.tv_nsec = ((unsigned long)self & 1023UL) * (1000000000UL / 1024UL); + + for (;;) + { + ts.tv_sec = 0; + + X_LOCK (pool->reqlock); + + for (;;) + { + req = reqq_shift (&pool->req_queue); + + if (ecb_expect_true (req)) + break; + + if (ts.tv_sec == 1) /* no request, but timeout detected, let's quit */ + { + X_UNLOCK (pool->reqlock); + X_LOCK (pool->wrklock); + --pool->started; + X_UNLOCK (pool->wrklock); + goto quit; + } + + ++pool->idle; + + if (pool->idle <= pool->max_idle) + /* we are allowed to pool->idle, so do so without any timeout */ + X_COND_WAIT (pool->reqwait, pool->reqlock); + else + { + /* initialise timeout once */ + if (!ts.tv_sec) + ts.tv_sec = time (0) + pool->idle_timeout; + + if (X_COND_TIMEDWAIT (pool->reqwait, pool->reqlock, ts) == ETIMEDOUT) + ts.tv_sec = 1; /* assuming this is not a value computed above.,.. */ + } + + --pool->idle; + } + + --pool->nready; + + X_UNLOCK (pool->reqlock); + + if (ecb_expect_false (req->type == ETP_TYPE_QUIT)) + goto quit; + + ETP_EXECUTE (self, req); + + X_LOCK (pool->reslock); + + ++pool->npending; + + if (!reqq_push (&pool->res_queue, req)) + ETP_WANT_POLL (pool); + + etp_worker_clear (self); + + X_UNLOCK (pool->reslock); + } + +quit: + free (req); + + X_LOCK (pool->wrklock); + etp_worker_free (self); + X_UNLOCK (pool->wrklock); + + return 0; +} static void ecb_cold -etp_start_thread (void) +etp_start_thread (etp_pool pool) { etp_worker *wrk = calloc (1, sizeof (etp_worker)); /*TODO*/ assert (("unable to allocate worker thread data", wrk)); - X_LOCK (wrklock); + wrk->pool = pool; + + X_LOCK (pool->wrklock); if (xthread_create (&wrk->tid, etp_proc, (void *)wrk)) { - wrk->prev = &wrk_first; - wrk->next = wrk_first.next; - wrk_first.next->prev = wrk; - wrk_first.next = wrk; - ++started; + wrk->prev = &pool->wrk_first; + wrk->next = pool->wrk_first.next; + pool->wrk_first.next->prev = wrk; + pool->wrk_first.next = wrk; + ++pool->started; } else free (wrk); - X_UNLOCK (wrklock); + X_UNLOCK (pool->wrklock); } static void -etp_maybe_start_thread (void) +etp_maybe_start_thread (etp_pool pool) { - if (ecb_expect_true (etp_nthreads () >= wanted)) + if (ecb_expect_true (etp_nthreads (pool) >= pool->wanted)) return; - /* todo: maybe use idle here, but might be less exact */ - if (ecb_expect_true (0 <= (int)etp_nthreads () + (int)etp_npending () - (int)etp_nreqs ())) + /* todo: maybe use pool->idle here, but might be less exact */ + if (ecb_expect_true (0 <= (int)etp_nthreads (pool) + (int)etp_npending (pool) - (int)etp_nreqs (pool))) return; - etp_start_thread (); + etp_start_thread (pool); } static void ecb_cold -etp_end_thread (void) +etp_end_thread (etp_pool pool) { ETP_REQ *req = calloc (1, sizeof (ETP_REQ)); /* will be freed by worker */ req->type = ETP_TYPE_QUIT; req->pri = ETP_PRI_MAX - ETP_PRI_MIN; - X_LOCK (reqlock); - reqq_push (&req_queue, req); - X_COND_SIGNAL (reqwait); - X_UNLOCK (reqlock); + X_LOCK (pool->reqlock); + reqq_push (&pool->req_queue, req); + X_COND_SIGNAL (pool->reqwait); + X_UNLOCK (pool->reqlock); - X_LOCK (wrklock); - --started; - X_UNLOCK (wrklock); + X_LOCK (pool->wrklock); + --pool->started; + X_UNLOCK (pool->wrklock); } ETP_API_DECL int -etp_poll (void) +etp_poll (etp_pool pool) { unsigned int maxreqs; unsigned int maxtime; struct timeval tv_start, tv_now; - X_LOCK (reslock); - maxreqs = max_poll_reqs; - maxtime = max_poll_time; - X_UNLOCK (reslock); + X_LOCK (pool->reslock); + maxreqs = pool->max_poll_reqs; + maxtime = pool->max_poll_time; + X_UNLOCK (pool->reslock); if (maxtime) gettimeofday (&tv_start, 0); @@ -338,31 +482,31 @@ etp_poll (void) { ETP_REQ *req; - etp_maybe_start_thread (); + etp_maybe_start_thread (pool); - X_LOCK (reslock); - req = reqq_shift (&res_queue); + X_LOCK (pool->reslock); + req = reqq_shift (&pool->res_queue); - if (req) + if (ecb_expect_true (req)) { - --npending; + --pool->npending; - if (!res_queue.size && done_poll_cb) - done_poll_cb (); + if (!pool->res_queue.size) + ETP_DONE_POLL (pool); } - X_UNLOCK (reslock); + X_UNLOCK (pool->reslock); - if (!req) + if (ecb_expect_false (!req)) return 0; - X_LOCK (reqlock); - --nreqs; - X_UNLOCK (reqlock); + X_LOCK (pool->reqlock); + --pool->nreqs; + X_UNLOCK (pool->reqlock); if (ecb_expect_false (req->type == ETP_TYPE_GROUP && req->size)) { - req->int1 = 1; /* mark request as delayed */ + req->flags |= ETP_FLAG_DELAYED; /* mark request as delayed */ continue; } else @@ -389,25 +533,25 @@ etp_poll (void) } ETP_API_DECL void -etp_grp_cancel (ETP_REQ *grp); +etp_grp_cancel (etp_pool pool, ETP_REQ *grp); ETP_API_DECL void -etp_cancel (ETP_REQ *req) +etp_cancel (etp_pool pool, ETP_REQ *req) { req->cancelled = 1; - etp_grp_cancel (req); + etp_grp_cancel (pool, req); } ETP_API_DECL void -etp_grp_cancel (ETP_REQ *grp) +etp_grp_cancel (etp_pool pool, ETP_REQ *grp) { for (grp = grp->grp_first; grp; grp = grp->grp_next) - etp_cancel (grp); + etp_cancel (pool, grp); } ETP_API_DECL void -etp_submit (ETP_REQ *req) +etp_submit (etp_pool pool, ETP_REQ *req) { req->pri -= ETP_PRI_MIN; @@ -417,78 +561,78 @@ etp_submit (ETP_REQ *req) if (ecb_expect_false (req->type == ETP_TYPE_GROUP)) { /* I hope this is worth it :/ */ - X_LOCK (reqlock); - ++nreqs; - X_UNLOCK (reqlock); + X_LOCK (pool->reqlock); + ++pool->nreqs; + X_UNLOCK (pool->reqlock); - X_LOCK (reslock); + X_LOCK (pool->reslock); - ++npending; + ++pool->npending; - if (!reqq_push (&res_queue, req) && want_poll_cb) - want_poll_cb (); + if (!reqq_push (&pool->res_queue, req)) + ETP_WANT_POLL (pool); - X_UNLOCK (reslock); + X_UNLOCK (pool->reslock); } else { - X_LOCK (reqlock); - ++nreqs; - ++nready; - reqq_push (&req_queue, req); - X_COND_SIGNAL (reqwait); - X_UNLOCK (reqlock); - - etp_maybe_start_thread (); + X_LOCK (pool->reqlock); + ++pool->nreqs; + ++pool->nready; + reqq_push (&pool->req_queue, req); + X_COND_SIGNAL (pool->reqwait); + X_UNLOCK (pool->reqlock); + + etp_maybe_start_thread (pool); } } ETP_API_DECL void ecb_cold -etp_set_max_poll_time (double nseconds) +etp_set_max_poll_time (etp_pool pool, double seconds) { - if (WORDACCESS_UNSAFE) X_LOCK (reslock); - max_poll_time = nseconds * ETP_TICKS; - if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); + if (WORDACCESS_UNSAFE) X_LOCK (pool->reslock); + pool->max_poll_time = seconds * ETP_TICKS; + if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reslock); } ETP_API_DECL void ecb_cold -etp_set_max_poll_reqs (unsigned int maxreqs) +etp_set_max_poll_reqs (etp_pool pool, unsigned int maxreqs) { - if (WORDACCESS_UNSAFE) X_LOCK (reslock); - max_poll_reqs = maxreqs; - if (WORDACCESS_UNSAFE) X_UNLOCK (reslock); + if (WORDACCESS_UNSAFE) X_LOCK (pool->reslock); + pool->max_poll_reqs = maxreqs; + if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reslock); } ETP_API_DECL void ecb_cold -etp_set_max_idle (unsigned int nthreads) +etp_set_max_idle (etp_pool pool, unsigned int threads) { - if (WORDACCESS_UNSAFE) X_LOCK (reqlock); - max_idle = nthreads; - if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); + if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); + pool->max_idle = threads; + if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); } ETP_API_DECL void ecb_cold -etp_set_idle_timeout (unsigned int seconds) +etp_set_idle_timeout (etp_pool pool, unsigned int seconds) { - if (WORDACCESS_UNSAFE) X_LOCK (reqlock); - idle_timeout = seconds; - if (WORDACCESS_UNSAFE) X_UNLOCK (reqlock); + if (WORDACCESS_UNSAFE) X_LOCK (pool->reqlock); + pool->idle_timeout = seconds; + if (WORDACCESS_UNSAFE) X_UNLOCK (pool->reqlock); } ETP_API_DECL void ecb_cold -etp_set_min_parallel (unsigned int nthreads) +etp_set_min_parallel (etp_pool pool, unsigned int threads) { - if (wanted < nthreads) - wanted = nthreads; + if (pool->wanted < threads) + pool->wanted = threads; } ETP_API_DECL void ecb_cold -etp_set_max_parallel (unsigned int nthreads) +etp_set_max_parallel (etp_pool pool, unsigned int threads) { - if (wanted > nthreads) - wanted = nthreads; + if (pool->wanted > threads) + pool->wanted = threads; - while (started > wanted) - etp_end_thread (); + while (pool->started > pool->wanted) + etp_end_thread (pool); } diff --git a/third_party/libeio/libeio.m4 b/third_party/libeio/libeio.m4 index c65a39997b2ba9293f3780384a2b60d943899513..d4121b7a7eca4cebe7d2154165eaf34bd6dc9642 100644 --- a/third_party/libeio/libeio.m4 +++ b/third_party/libeio/libeio.m4 @@ -193,3 +193,15 @@ int main (void) ])],ac_cv_linux_splice=yes,ac_cv_linux_splice=no)]) test $ac_cv_linux_splice = yes && AC_DEFINE(HAVE_LINUX_SPLICE, 1, splice/vmsplice/tee(2) are available) +AC_CACHE_CHECK(for pipe2, ac_cv_pipe2, [AC_LINK_IFELSE([AC_LANG_SOURCE([[ +#include <fcntl.h> +#include <unistd.h> +int res; +int main (void) +{ + res = pipe2 (0, 0); + return 0; +} +]])],ac_cv_pipe2=yes,ac_cv_pipe2=no)]) +test $ac_cv_pipe2 = yes && AC_DEFINE(HAVE_PIPE2, 1, pipe2(2) is available) + diff --git a/third_party/libeio/xthread.h b/third_party/libeio/xthread.h index 86ae73699c410b7aa2a359351d1907421310a186..c04518a9b37604a1cb0e900015b63ed4ba3fcd03 100644 --- a/third_party/libeio/xthread.h +++ b/third_party/libeio/xthread.h @@ -169,5 +169,18 @@ xthread_create (xthread_t *tid, void *(*proc)(void *), void *arg) #endif +#if __linux && __GNUC__ >= 4 && __GLIBC__ >= 2 && __GLIBC_MINOR__ >= 3 && 0 /* also check arch */ +/* __thread has little to no advantage over pthread_* in most configurations, so this is not used */ +# define X_TLS_DECLARE(varname) __thread void *varname +# define X_TLS_INIT(varname) +# define X_TLS_SET(varname,value) varname = (value) +# define X_TLS_GET(varname) varname +#else +# define X_TLS_DECLARE(varname) pthread_key_t varname +# define X_TLS_INIT(varname) do { if (pthread_key_create (&(varname), 0)) abort (); } while (0) +# define X_TLS_SET(varname,value) pthread_setspecific (varname, (value)) +# define X_TLS_GET(varname) pthread_getspecific (varname) +#endif + #endif