diff --git a/docs/refguide.xml b/docs/refguide.xml
index c57694cfe..39cd17336 100644
--- a/docs/refguide.xml
+++ b/docs/refguide.xml
@@ -2884,7 +2884,7 @@ worth the extra time.
+ epoll|kqueue|poll|select
Nsock IO engine
@@ -2894,7 +2894,9 @@ worth the extra time.
select(2)-based fallback engine is guaranteed to be
available on your system. Engines are named after the name of the IO
management facility they leverage. Engines currenty implemented are
-epoll and select.
+epoll, kqueue, poll,
+and select, but not all will be present on any platform.
+Use nmap -V to see which engines are supported.
diff --git a/nsock/include/nsock_config.h.in b/nsock/include/nsock_config.h.in
index 08b4d9658..09ec194ee 100644
--- a/nsock/include/nsock_config.h.in
+++ b/nsock/include/nsock_config.h.in
@@ -78,4 +78,5 @@
#undef HAVE_SSL_SET_TLSEXT_HOST_NAME
#undef HAVE_EPOLL
-
+#undef HAVE_POLL
+#undef HAVE_KQUEUE
diff --git a/nsock/include/nsock_winconfig.h b/nsock/include/nsock_winconfig.h
index d69a74081..b9c4c024d 100644
--- a/nsock/include/nsock_winconfig.h
+++ b/nsock/include/nsock_winconfig.h
@@ -97,5 +97,6 @@
#define HAVE_PCAP 1
#endif
+#define HAVE_POLL 1
-#endif /* NSOCK_WINCONFIG_H */
\ No newline at end of file
+#endif /* NSOCK_WINCONFIG_H */
diff --git a/nsock/nsock.vcxproj b/nsock/nsock.vcxproj
index 813fd90c5..8637cba3a 100644
--- a/nsock/nsock.vcxproj
+++ b/nsock/nsock.vcxproj
@@ -183,6 +183,8 @@
+
+
@@ -220,4 +222,4 @@
-
\ No newline at end of file
+
diff --git a/nsock/src/Makefile.in b/nsock/src/Makefile.in
index ec32b9c97..7fdc856ce 100644
--- a/nsock/src/Makefile.in
+++ b/nsock/src/Makefile.in
@@ -27,9 +27,9 @@ NBASEDIR=@NBASEDIR@
TARGET = libnsock.a
-SRCS = error.c filespace.c gh_list.c nsock_connect.c nsock_core.c nsock_iod.c nsock_read.c nsock_timers.c nsock_write.c nsock_ssl.c nsock_event.c nsock_pool.c netutils.c nsock_pcap.c nsock_engines.c engine_select.c engine_epoll.c @COMPAT_SRCS@
+SRCS = error.c filespace.c gh_list.c nsock_connect.c nsock_core.c nsock_iod.c nsock_read.c nsock_timers.c nsock_write.c nsock_ssl.c nsock_event.c nsock_pool.c netutils.c nsock_pcap.c nsock_engines.c engine_select.c engine_epoll.c engine_kqueue.c engine_poll.c @COMPAT_SRCS@
-OBJS = error.o filespace.o gh_list.o nsock_connect.o nsock_core.o nsock_iod.o nsock_read.o nsock_timers.o nsock_write.o nsock_ssl.o nsock_event.o nsock_pool.o netutils.o nsock_pcap.o nsock_engines.o engine_select.o engine_epoll.o @COMPAT_OBJS@
+OBJS = error.o filespace.o gh_list.o nsock_connect.o nsock_core.o nsock_iod.o nsock_read.o nsock_timers.o nsock_write.o nsock_ssl.o nsock_event.o nsock_pool.o netutils.o nsock_pcap.o nsock_engines.o engine_select.o engine_epoll.o engine_kqueue.o engine_poll.o @COMPAT_OBJS@
DEPS = error.h filespace.h gh_list.h nsock_internal.h netutils.h nsock_pcap.h ../include/nsock.h $(NBASEDIR)/libnbase.a
diff --git a/nsock/src/acinclude.m4 b/nsock/src/acinclude.m4
index 07ceb49f1..9a972cc97 100644
--- a/nsock/src/acinclude.m4
+++ b/nsock/src/acinclude.m4
@@ -102,3 +102,19 @@ rc = epoll_pwait(fd, &ev, 1, 0, (sigset_t const *)(0));])],
$1],[AC_MSG_RESULT([no])
$2])
])dnl
+
+AC_DEFUN([AX_HAVE_POLL], [dnl
+ AC_MSG_CHECKING([for poll(2)])
+ AC_CACHE_VAL([ax_cv_have_poll], [dnl
+ AC_LINK_IFELSE([dnl
+ AC_LANG_PROGRAM(
+ [#include ],
+ [int rc; rc = poll((struct pollfd *)(0), 0, 0);])],
+ [ax_cv_have_poll=yes],
+ [ax_cv_have_poll=no])])
+ AS_IF([test "${ax_cv_have_poll}" = "yes"],
+ [AC_MSG_RESULT([yes])
+$1],[AC_MSG_RESULT([no])
+$2])
+])dnl
+
diff --git a/nsock/src/configure.ac b/nsock/src/configure.ac
index 9d956b0f7..6cd72fce1 100644
--- a/nsock/src/configure.ac
+++ b/nsock/src/configure.ac
@@ -113,6 +113,8 @@ if test "$pcap_enabled" != "no"; then
fi
AX_HAVE_EPOLL([AC_DEFINE(HAVE_EPOLL)], )
+AX_HAVE_POLL([AC_DEFINE(HAVE_POLL)], )
+AC_CHECK_FUNCS(kqueue kevent, [AC_DEFINE(HAVE_KQUEUE)], )
dnl Checks for programs.
AC_PROG_CC
diff --git a/nsock/src/engine_kqueue.c b/nsock/src/engine_kqueue.c
new file mode 100644
index 000000000..6c0a56d4c
--- /dev/null
+++ b/nsock/src/engine_kqueue.c
@@ -0,0 +1,391 @@
+/***************************************************************************
+ * engine_kqueue.c -- BSD kqueue(2) based IO engine. *
+ * *
+ ***********************IMPORTANT NSOCK LICENSE TERMS***********************
+ * *
+ * The nsock parallel socket event library is (C) 1999-2012 Insecure.Com *
+ * LLC This library is free software; you may redistribute and/or *
+ * modify it under the terms of the GNU General Public License as *
+ * published by the Free Software Foundation; Version 2. This guarantees *
+ * your right to use, modify, and redistribute this software under certain *
+ * conditions. If this license is unacceptable to you, Insecure.Com LLC *
+ * may be willing to sell alternative licenses (contact *
+ * sales@insecure.com ). *
+ * *
+ * As a special exception to the GPL terms, Insecure.Com LLC grants *
+ * permission to link the code of this program with any version of the *
+ * OpenSSL library which is distributed under a license identical to that *
+ * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
+ * linked combinations including the two. You must obey the GNU GPL in all *
+ * respects for all of the code used other than OpenSSL. If you modify *
+ * this file, you may extend this exception to your version of the file, *
+ * but you are not obligated to do so. *
+ * *
+ * If you received these files with a written license agreement stating *
+ * terms other than the (GPL) terms above, then that alternative license *
+ * agreement takes precedence over this comment. *
+ * *
+ * Source is provided to this software because we believe users have a *
+ * right to know exactly what a program is going to do before they run it. *
+ * This also allows you to audit the software for security holes (none *
+ * have been found so far). *
+ * *
+ * Source code also allows you to port Nmap to new platforms, fix bugs, *
+ * and add new features. You are highly encouraged to send your changes *
+ * to nmap-dev@insecure.org for possible incorporation into the main *
+ * distribution. By sending these changes to Fyodor or one of the *
+ * Insecure.Org development mailing lists, it is assumed that you are *
+ * offering the Nmap Project (Insecure.Com LLC) the unlimited, *
+ * non-exclusive right to reuse, modify, and relicense the code. Nmap *
+ * will always be available Open Source, but this is important because the *
+ * inability to relicense code has caused devastating problems for other *
+ * Free Software projects (such as KDE and NASM). We also occasionally *
+ * relicense the code to third parties as discussed above. If you wish to *
+ * specify special license conditions of your contributions, just say so *
+ * when you send them. *
+ * *
+ * This program is distributed in the hope that it will be useful, but *
+ * WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * General Public License v2.0 for more details *
+ * (http://www.gnu.org/licenses/gpl-2.0.html). *
+ * *
+ ***************************************************************************/
+
+/* $Id$ */
+
+#ifdef HAVE_CONFIG_H
+#include "nsock_config.h"
+#endif
+
+#if HAVE_KQUEUE
+
+#include
+#include
+#include
+#include
+
+#include "nsock_internal.h"
+
+#if HAVE_PCAP
+#include "nsock_pcap.h"
+#endif
+
+#define INITIAL_EV_COUNT 128
+
+
+/* --- ENGINE INTERFACE PROTOTYPES --- */
+static int kqueue_init(mspool *nsp);
+static void kqueue_destroy(mspool *nsp);
+static int kqueue_iod_register(mspool *nsp, msiod *iod, int ev);
+static int kqueue_iod_unregister(mspool *nsp, msiod *iod);
+static int kqueue_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr);
+static int kqueue_loop(mspool *nsp, int msec_timeout);
+
+
+/* ---- ENGINE DEFINITION ---- */
+struct io_engine engine_kqueue = {
+ "kqueue",
+ kqueue_init,
+ kqueue_destroy,
+ kqueue_iod_register,
+ kqueue_iod_unregister,
+ kqueue_iod_modify,
+ kqueue_loop
+};
+
+
+/* --- INTERNAL PROTOTYPES --- */
+static void iterate_through_event_lists(mspool *nsp, int evcount);
+
+/* defined in nsock_core.c */
+void process_iod_events(mspool *nsp, msiod *nsi, int ev);
+void process_event(mspool *nsp, gh_list *evlist, msevent *nse, int ev);
+#if HAVE_PCAP
+#ifndef PCAP_CAN_DO_SELECT
+int pcap_read_on_nonselect(mspool *nsp);
+#endif
+#endif
+
+/* defined in nsock_event.c */
+void update_first_events(msevent *nse);
+
+
+extern struct timeval nsock_tod;
+
+
+/*
+ * Engine specific data structure
+ */
+struct kqueue_engine_info {
+ int kqfd;
+ int maxfd;
+ size_t evlen;
+ struct kevent *events;
+};
+
+
+int kqueue_init(mspool *nsp) {
+ struct kqueue_engine_info *kinfo;
+
+ kinfo = (struct kqueue_engine_info *)safe_malloc(sizeof(struct kqueue_engine_info));
+
+ kinfo->kqfd = kqueue();
+ kinfo->maxfd = -1;
+ kinfo->evlen = INITIAL_EV_COUNT;
+ kinfo->events = (struct kevent *)safe_malloc(INITIAL_EV_COUNT * sizeof(struct kevent));
+
+ nsp->engine_data = (void *)kinfo;
+
+ return 1;
+}
+
+void kqueue_destroy(mspool *nsp) {
+ struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
+
+ assert(kinfo != NULL);
+ close(kinfo->kqfd);
+ free(kinfo->events);
+ free(kinfo);
+}
+
+int kqueue_iod_register(mspool *nsp, msiod *iod, int ev) {
+ struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
+
+ assert(!IOD_PROPGET(iod, IOD_REGISTERED));
+
+ IOD_PROPSET(iod, IOD_REGISTERED);
+ iod->watched_events = EV_NONE;
+
+ kqueue_iod_modify(nsp, iod, ev, EV_NONE);
+
+ if (nsi_getsd(iod) > kinfo->maxfd)
+ kinfo->maxfd = nsi_getsd(iod);
+
+ return 1;
+}
+
+int kqueue_iod_unregister(mspool *nsp, msiod *iod) {
+ struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
+
+ /* some IODs can be unregistered here if they're associated to an event that was
+ * immediately completed */
+ if (IOD_PROPGET(iod, IOD_REGISTERED)) {
+ kqueue_iod_modify(nsp, iod, EV_NONE, EV_READ|EV_WRITE);
+ IOD_PROPCLR(iod, IOD_REGISTERED);
+
+ if (nsi_getsd(iod) == kinfo->maxfd)
+ kinfo->maxfd--;
+ }
+ iod->watched_events = EV_NONE;
+ return 1;
+}
+
+#define EV_SETFLAG(_set, _ev) (((_set) & (_ev)) ? (EV_ADD|EV_ENABLE) : (EV_ADD|EV_DISABLE))
+
+int kqueue_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr) {
+ struct kevent kev[2];
+ int new_events, i;
+ struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
+
+ assert((ev_set & ev_clr) == 0);
+ assert(IOD_PROPGET(iod, IOD_REGISTERED));
+
+ new_events = iod->watched_events;
+ new_events |= ev_set;
+ new_events &= ~ev_clr;
+
+ if (new_events == iod->watched_events)
+ return 1; /* nothing to do */
+
+ i = 0;
+ if ((ev_set ^ ev_clr) & EV_READ) {
+ EV_SET(&kev[i], nsi_getsd(iod), EVFILT_READ, EV_SETFLAG(ev_set, EV_READ), 0, 0, (void *)iod);
+ i++;
+ }
+ if ((ev_set ^ ev_clr) & EV_WRITE) {
+ EV_SET(&kev[i], nsi_getsd(iod), EVFILT_WRITE, EV_SETFLAG(ev_set, EV_WRITE), 0, 0, (void *)iod);
+ i++;
+ }
+
+ if (i > 0 && kevent(kinfo->kqfd, kev, i, NULL, 0, NULL) < 0)
+ fatal("Unable to update events for IOD #%lu: %s", iod->id, strerror(errno));
+
+ iod->watched_events = new_events;
+ return 1;
+}
+
+int kqueue_loop(mspool *nsp, int msec_timeout) {
+ int results_left = 0;
+ int event_msecs; /* msecs before an event goes off */
+ int combined_msecs;
+ struct timespec ts, *ts_p;
+ int sock_err = 0;
+ struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
+
+ assert(msec_timeout >= -1);
+
+ if (nsp->events_pending == 0)
+ return 0; /* No need to wait on 0 events ... */
+
+
+ if (GH_LIST_COUNT(&nsp->active_iods) > kinfo->evlen) {
+ kinfo->evlen = GH_LIST_COUNT(&nsp->active_iods) * 2;
+ kinfo->events = (struct kevent *)safe_realloc(kinfo->events, kinfo->evlen * sizeof(struct kevent));
+ }
+
+ do {
+ if (nsp->tracelevel > 6)
+ nsock_trace(nsp, "wait_for_events");
+
+ if (nsp->next_ev.tv_sec == 0)
+ event_msecs = -1; /* None of the events specified a timeout */
+ else
+ event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nsp->next_ev, nsock_tod));
+
+#if HAVE_PCAP
+#ifndef PCAP_CAN_DO_SELECT
+ /* Force a low timeout when capturing packets on systems where
+ * the pcap descriptor is not select()able. */
+ if (GH_LIST_COUNT(&nsp->pcap_read_events) > 0)
+ if (event_msecs > PCAP_POLL_INTERVAL)
+ event_msecs = PCAP_POLL_INTERVAL;
+#endif
+#endif
+
+ /* We cast to unsigned because we want -1 to be very high (since it means no
+ * timeout) */
+ combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);
+
+ /* Set up the timeval pointer we will give to kevent() */
+ memset(&ts, 0, sizeof(struct timespec));
+ if (combined_msecs >= 0) {
+ ts.tv_sec = combined_msecs / 1000;
+ ts.tv_nsec = (combined_msecs % 1000) * 1000000L;
+ ts_p = &ts;
+ } else {
+ ts_p = NULL;
+ }
+
+#if HAVE_PCAP
+#ifndef PCAP_CAN_DO_SELECT
+ /* do non-blocking read on pcap devices that doesn't support select()
+ * If there is anything read, just leave this loop. */
+ if (pcap_read_on_nonselect(nsp)) {
+ /* okay, something was read. */
+ } else
+#endif
+#endif
+ {
+ results_left = kevent(kinfo->kqfd, NULL, 0, kinfo->events, kinfo->evlen, ts_p);
+ if (results_left == -1)
+ sock_err = socket_errno();
+ }
+
+ gettimeofday(&nsock_tod, NULL); /* Due to kevent delay */
+ } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */
+
+ if (results_left == -1 && sock_err != EINTR) {
+ nsock_trace(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
+ nsp->errnum = sock_err;
+ return -1;
+ }
+
+ iterate_through_event_lists(nsp, results_left);
+
+ return 1;
+}
+
+
+/* ---- INTERNAL FUNCTIONS ---- */
+
+static int get_nsock_event(msiod *nsi, const struct kevent *kev) {
+ int evmask = EV_NONE;
+
+ /* generate the corresponding event mask with nsock event flags */
+ if (kev->flags & EV_ERROR) {
+ evmask |= EV_EXCEPT;
+
+ if (kev->data == EPIPE && (nsi->watched_events & EV_READ))
+ evmask |= EV_READ;
+ } else {
+ switch (kev->filter) {
+ case EVFILT_READ:
+ evmask |= EV_READ;
+ break;
+
+ case EVFILT_WRITE:
+ evmask |= EV_WRITE;
+ break;
+
+ default:
+ fatal("Unsupported filter value: %d\n", (int)kev->filter);
+ }
+ }
+ return evmask;
+}
+
+/* Iterate through all the event lists (such as connect_events, read_events,
+ * timer_events, etc) and take action for those that have completed (due to
+ * timeout, i/o, etc) */
+void iterate_through_event_lists(mspool *nsp, int evcount) {
+ int n;
+ struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data;
+ gh_list_elem *current, *next, *last, *timer_last;
+ msevent *nse;
+ msiod *nsi;
+
+ /* Clear it -- We will find the next event as we go through the list */
+ nsp->next_ev.tv_sec = 0;
+
+ last = GH_LIST_LAST_ELEM(&nsp->active_iods);
+ timer_last = GH_LIST_LAST_ELEM(&nsp->timer_events);
+
+ for (n = 0; n < evcount; n++) {
+ struct kevent *kev = &kinfo->events[n];
+ int evmask;
+
+ nsi = (msiod *)kev->udata;
+
+ /* process all the pending events for this IOD */
+ evmask = get_nsock_event(nsi, kev);
+ process_iod_events(nsp, nsi, evmask);
+
+ IOD_PROPSET(nsi, IOD_PROCESSED);
+ }
+
+ current = GH_LIST_FIRST_ELEM(&nsp->active_iods);
+
+ /* cull timeouts amongst the non active IODs */
+ while (current != NULL && GH_LIST_ELEM_PREV(current) != last) {
+ msiod *nsi = (msiod *)GH_LIST_ELEM_DATA(current);
+
+ if (IOD_PROPGET(nsi, IOD_PROCESSED))
+ IOD_PROPCLR(nsi, IOD_PROCESSED);
+ else if (nsi->state != NSIOD_STATE_DELETED && nsi->events_pending)
+ process_iod_events(nsp, nsi, EV_NONE);
+
+ next = GH_LIST_ELEM_NEXT(current);
+ if (nsi->state == NSIOD_STATE_DELETED) {
+ gh_list_remove_elem(&nsp->active_iods, current);
+ gh_list_prepend(&nsp->free_iods, nsi);
+ }
+ current = next;
+ }
+
+ /* iterate through timers */
+ for (current = GH_LIST_FIRST_ELEM(&nsp->timer_events);
+ current != NULL && GH_LIST_ELEM_PREV(current) != timer_last; current = next) {
+
+ nse = (msevent *)GH_LIST_ELEM_DATA(current);
+
+ process_event(nsp, &nsp->timer_events, nse, EV_NONE);
+
+ next = GH_LIST_ELEM_NEXT(current);
+ if (nse->event_done)
+ gh_list_remove_elem(&nsp->timer_events, current);
+ }
+}
+
+#endif /* HAVE_KQUEUE */
+
diff --git a/nsock/src/engine_poll.c b/nsock/src/engine_poll.c
new file mode 100644
index 000000000..0bd6f0a1e
--- /dev/null
+++ b/nsock/src/engine_poll.c
@@ -0,0 +1,426 @@
+/***************************************************************************
+ * engine_poll.c -- poll(2) based IO engine. *
+ * *
+ ***********************IMPORTANT NSOCK LICENSE TERMS***********************
+ * *
+ * The nsock parallel socket event library is (C) 1999-2012 Insecure.Com *
+ * LLC This library is free software; you may redistribute and/or *
+ * modify it under the terms of the GNU General Public License as *
+ * published by the Free Software Foundation; Version 2. This guarantees *
+ * your right to use, modify, and redistribute this software under certain *
+ * conditions. If this license is unacceptable to you, Insecure.Com LLC *
+ * may be willing to sell alternative licenses (contact *
+ * sales@insecure.com ). *
+ * *
+ * As a special exception to the GPL terms, Insecure.Com LLC grants *
+ * permission to link the code of this program with any version of the *
+ * OpenSSL library which is distributed under a license identical to that *
+ * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
+ * linked combinations including the two. You must obey the GNU GPL in all *
+ * respects for all of the code used other than OpenSSL. If you modify *
+ * this file, you may extend this exception to your version of the file, *
+ * but you are not obligated to do so. *
+ * *
+ * If you received these files with a written license agreement stating *
+ * terms other than the (GPL) terms above, then that alternative license *
+ * agreement takes precedence over this comment. *
+ * *
+ * Source is provided to this software because we believe users have a *
+ * right to know exactly what a program is going to do before they run it. *
+ * This also allows you to audit the software for security holes (none *
+ * have been found so far). *
+ * *
+ * Source code also allows you to port Nmap to new platforms, fix bugs, *
+ * and add new features. You are highly encouraged to send your changes *
+ * to nmap-dev@insecure.org for possible incorporation into the main *
+ * distribution. By sending these changes to Fyodor or one of the *
+ * Insecure.Org development mailing lists, it is assumed that you are *
+ * offering the Nmap Project (Insecure.Com LLC) the unlimited, *
+ * non-exclusive right to reuse, modify, and relicense the code. Nmap *
+ * will always be available Open Source, but this is important because the *
+ * inability to relicense code has caused devastating problems for other *
+ * Free Software projects (such as KDE and NASM). We also occasionally *
+ * relicense the code to third parties as discussed above. If you wish to *
+ * specify special license conditions of your contributions, just say so *
+ * when you send them. *
+ * *
+ * This program is distributed in the hope that it will be useful, but *
+ * WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
+ * General Public License v2.0 for more details *
+ * (http://www.gnu.org/licenses/gpl-2.0.html). *
+ * *
+ ***************************************************************************/
+
+/* $Id$ */
+
+#ifndef WIN32
+/* Allow the use of POLLRDHUP, if available. */
+#define _GNU_SOURCE
+#endif
+
+#ifdef HAVE_CONFIG_H
+#include "nsock_config.h"
+#endif
+
+#if HAVE_POLL
+
+#ifndef WIN32
+#include
+#else
+#include
+#endif /* ^WIN32 */
+
+#include "nsock_internal.h"
+
+#if HAVE_PCAP
+#include "nsock_pcap.h"
+#endif
+
+#define EV_LIST_INIT_SIZE 1024
+
+#ifdef WIN32
+ #define Poll WSAPoll
+ #define POLLFD WSAPOLLFD
+#else
+ #define Poll poll
+ #define POLLFD struct pollfd
+#endif
+
+#define POLL_R_FLAGS (POLLIN | POLLPRI)
+#define POLL_W_FLAGS POLLOUT
+#ifdef POLLRDHUP
+ #define POLL_X_FLAGS (POLLERR | POLLHUP | POLLRDHUP)
+#else
+ /* POLLRDHUP was introduced later and might be unavailable on older systems. */
+ #define POLL_X_FLAGS (POLLERR | POLLHUP)
+#endif /* POLLRDHUP */
+
+#define LOWER_MAX_FD(pinfo) \
+ do { \
+ pinfo->max_fd--; \
+ } while (pinfo->max_fd >= 0 && pinfo->events[pinfo->max_fd].fd == -1)
+
+
+/* --- ENGINE INTERFACE PROTOTYPES --- */
+static int poll_init(mspool *nsp);
+static void poll_destroy(mspool *nsp);
+static int poll_iod_register(mspool *nsp, msiod *iod, int ev);
+static int poll_iod_unregister(mspool *nsp, msiod *iod);
+static int poll_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr);
+static int poll_loop(mspool *nsp, int msec_timeout);
+
+
+/* ---- ENGINE DEFINITION ---- */
+struct io_engine engine_poll = {
+ "poll",
+ poll_init,
+ poll_destroy,
+ poll_iod_register,
+ poll_iod_unregister,
+ poll_iod_modify,
+ poll_loop
+};
+
+
+/* --- INTERNAL PROTOTYPES --- */
+static void iterate_through_event_lists(mspool *nsp);
+
+/* defined in nsock_core.c */
+void process_iod_events(mspool *nsp, msiod *nsi, int ev);
+void process_event(mspool *nsp, gh_list *evlist, msevent *nse, int ev);
+#if HAVE_PCAP
+#ifndef PCAP_CAN_DO_SELECT
+int pcap_read_on_nonselect(mspool *nsp);
+#endif
+#endif
+
+/* defined in nsock_event.c */
+void update_first_events(msevent *nse);
+
+
+extern struct timeval nsock_tod;
+
+
+/*
+ * Engine specific data structure
+ */
+struct poll_engine_info {
+ int capacity;
+ int max_fd;
+ /* index of the highest poll event */
+ POLLFD *events;
+};
+
+
+static inline int evlist_grow(struct poll_engine_info *pinfo) {
+ int i;
+
+ i = pinfo->capacity;
+
+ if (pinfo->capacity == 0) {
+ pinfo->capacity = EV_LIST_INIT_SIZE;
+ pinfo->events = (POLLFD *)safe_malloc(sizeof(POLLFD) * pinfo->capacity);
+ } else {
+ pinfo->capacity *= 2;
+ pinfo->events = (POLLFD *)safe_realloc(pinfo->events, sizeof(POLLFD) * pinfo->capacity);
+ }
+
+ while (i < pinfo->capacity) {
+ pinfo->events[i].fd = -1;
+ pinfo->events[i].events = 0;
+ pinfo->events[i].revents = 0;
+ i++;
+ }
+ return pinfo->capacity;
+}
+
+
+int poll_init(mspool *nsp) {
+ struct poll_engine_info *pinfo;
+
+ pinfo = (struct poll_engine_info *)safe_malloc(sizeof(struct poll_engine_info));
+ pinfo->capacity = 0;
+ pinfo->max_fd = -1;
+ evlist_grow(pinfo);
+
+ nsp->engine_data = (void *)pinfo;
+
+ return 1;
+}
+
+void poll_destroy(mspool *nsp) {
+ struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
+
+ assert(pinfo != NULL);
+ free(pinfo->events);
+ free(pinfo);
+}
+
+int poll_iod_register(mspool *nsp, msiod *iod, int ev) {
+ struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
+ int sd;
+
+ assert(!IOD_PROPGET(iod, IOD_REGISTERED));
+
+ iod->watched_events = ev;
+
+ sd = nsi_getsd(iod);
+ while (pinfo->capacity < sd + 1)
+ evlist_grow(pinfo);
+
+ pinfo->events[sd].fd = sd;
+ pinfo->events[sd].events = 0;
+ pinfo->events[sd].revents = 0;
+
+ pinfo->max_fd = MAX(pinfo->max_fd, sd);
+
+ if (ev & EV_READ)
+ pinfo->events[sd].events |= POLL_R_FLAGS;
+ if (ev & EV_WRITE)
+ pinfo->events[sd].events |= POLL_W_FLAGS;
+ if (ev & EV_EXCEPT)
+ pinfo->events[sd].events |= POLL_X_FLAGS;
+
+ IOD_PROPSET(iod, IOD_REGISTERED);
+ return 1;
+}
+
+int poll_iod_unregister(mspool *nsp, msiod *iod) {
+ iod->watched_events = EV_NONE;
+
+ /* some IODs can be unregistered here if they're associated to an event that was
+ * immediately completed */
+ if (IOD_PROPGET(iod, IOD_REGISTERED)) {
+ struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
+ int sd;
+
+ sd = nsi_getsd(iod);
+ pinfo->events[sd].fd = -1;
+ pinfo->events[sd].events = 0;
+ pinfo->events[sd].revents = 0;
+
+ if (pinfo->max_fd == sd) {
+ LOWER_MAX_FD(pinfo);
+ }
+
+ IOD_PROPCLR(iod, IOD_REGISTERED);
+ }
+ return 1;
+}
+
+int poll_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr) {
+ int sd;
+ int new_events;
+ struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
+
+ assert((ev_set & ev_clr) == 0);
+ assert(IOD_PROPGET(iod, IOD_REGISTERED));
+
+ new_events = iod->watched_events;
+ new_events |= ev_set;
+ new_events &= ~ev_clr;
+
+ if (new_events == iod->watched_events)
+ return 1; /* nothing to do */
+
+ iod->watched_events = new_events;
+
+ sd = nsi_getsd(iod);
+
+ pinfo->events[sd].fd = sd;
+ pinfo->events[sd].events = 0;
+
+ /* regenerate the current set of events for this IOD */
+ if (iod->watched_events & EV_READ)
+ pinfo->events[sd].events |= POLL_R_FLAGS;
+ if (iod->watched_events & EV_WRITE)
+ pinfo->events[sd].events |= POLL_W_FLAGS;
+ if (iod->watched_events & EV_EXCEPT)
+ pinfo->events[sd].events |= POLL_X_FLAGS;
+
+ return 1;
+}
+
+int poll_loop(mspool *nsp, int msec_timeout) {
+ int results_left = 0;
+ int event_msecs; /* msecs before an event goes off */
+ int combined_msecs;
+ int sock_err = 0;
+ struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
+
+ assert(msec_timeout >= -1);
+
+ if (nsp->events_pending == 0)
+ return 0; /* No need to wait on 0 events ... */
+
+ do {
+ if (nsp->tracelevel > 6)
+ nsock_trace(nsp, "wait_for_events");
+
+ if (nsp->next_ev.tv_sec == 0)
+ event_msecs = -1; /* None of the events specified a timeout */
+ else
+ event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nsp->next_ev, nsock_tod));
+
+#if HAVE_PCAP
+#ifndef PCAP_CAN_DO_SELECT
+ /* Force a low timeout when capturing packets on systems where
+ * the pcap descriptor is not select()able. */
+ if (GH_LIST_COUNT(&nsp->pcap_read_events) > 0)
+ if (event_msecs > PCAP_POLL_INTERVAL)
+ event_msecs = PCAP_POLL_INTERVAL;
+#endif
+#endif
+
+ /* We cast to unsigned because we want -1 to be very high (since it means no
+ * timeout) */
+ combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout);
+
+#if HAVE_PCAP
+#ifndef PCAP_CAN_DO_SELECT
+ /* do non-blocking read on pcap devices that doesn't support select()
+ * If there is anything read, just leave this loop. */
+ if (pcap_read_on_nonselect(nsp)) {
+ /* okay, something was read. */
+ } else
+#endif
+#endif
+ {
+ if (pinfo->max_fd > -1)
+ results_left = Poll(pinfo->events, pinfo->max_fd + 1, combined_msecs);
+ else
+ results_left = 0;
+
+ if (results_left == -1)
+ sock_err = socket_errno();
+ }
+
+ gettimeofday(&nsock_tod, NULL); /* Due to poll delay */
+ } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */
+
+ if (results_left == -1 && sock_err != EINTR) {
+ nsock_trace(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
+ nsp->errnum = sock_err;
+ return -1;
+ }
+
+ iterate_through_event_lists(nsp);
+
+ return 1;
+}
+
+
+/* ---- INTERNAL FUNCTIONS ---- */
+
+static inline int get_evmask(mspool *nsp, msiod *nsi) {
+ struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data;
+ int sd, evmask = EV_NONE;
+ POLLFD *pev;
+
+ if (nsi->state != NSIOD_STATE_DELETED
+ && nsi->events_pending
+ && IOD_PROPGET(nsi, IOD_REGISTERED)) {
+
+#if HAVE_PCAP
+ if (nsi->pcap)
+ sd = ((mspcap *)nsi->pcap)->pcap_desc;
+ else
+#endif
+ sd = nsi->sd;
+
+ assert(sd < pinfo->capacity);
+ pev = &pinfo->events[sd];
+
+ if (pev->revents & POLL_R_FLAGS)
+ evmask |= EV_READ;
+ if (pev->revents & POLL_W_FLAGS)
+ evmask |= EV_WRITE;
+ if (pev->events && (pev->revents & POLL_X_FLAGS))
+ evmask |= (EV_READ | EV_WRITE | EV_EXCEPT);
+ }
+ return evmask;
+}
+
+/* Iterate through all the event lists (such as connect_events, read_events,
+ * timer_events, etc) and take action for those that have completed (due to
+ * timeout, i/o, etc) */
+void iterate_through_event_lists(mspool *nsp) {
+ gh_list_elem *current, *next, *last, *timer_last;
+
+ /* Clear it -- We will find the next event as we go through the list */
+ nsp->next_ev.tv_sec = 0;
+
+ last = GH_LIST_LAST_ELEM(&nsp->active_iods);
+ timer_last = GH_LIST_LAST_ELEM(&nsp->timer_events);
+
+ for (current = GH_LIST_FIRST_ELEM(&nsp->active_iods);
+ current != NULL && GH_LIST_ELEM_PREV(current) != last; current = next) {
+
+ msiod *nsi = (msiod *)GH_LIST_ELEM_DATA(current);
+
+ process_iod_events(nsp, nsi, get_evmask(nsp, nsi));
+
+ next = GH_LIST_ELEM_NEXT(current);
+ if (nsi->state == NSIOD_STATE_DELETED) {
+ gh_list_remove_elem(&nsp->active_iods, current);
+ gh_list_prepend(&nsp->free_iods, nsi);
+ }
+ }
+
+ /* iterate through timers */
+ for (current = GH_LIST_FIRST_ELEM(&nsp->timer_events);
+ current != NULL && GH_LIST_ELEM_PREV(current) != timer_last; current = next) {
+
+ msevent *nse = (msevent *)GH_LIST_ELEM_DATA(current);
+
+ process_event(nsp, &nsp->timer_events, nse, EV_NONE);
+
+ next = GH_LIST_ELEM_NEXT(current);
+ if (nse->event_done)
+ gh_list_remove_elem(&nsp->timer_events, current);
+ }
+}
+
+#endif /* HAVE_POLL */
+
diff --git a/nsock/src/nsock_engines.c b/nsock/src/nsock_engines.c
index f7d107580..9e2059991 100644
--- a/nsock/src/nsock_engines.c
+++ b/nsock/src/nsock_engines.c
@@ -71,6 +71,20 @@
#define ENGINE_EPOLL
#endif /* HAVE_EPOLL */
+#if HAVE_KQUEUE
+ extern struct io_engine engine_kqueue;
+ #define ENGINE_KQUEUE &engine_kqueue,
+#else
+ #define ENGINE_KQUEUE
+#endif /* HAVE_KQUEUE */
+
+#if HAVE_POLL
+ extern struct io_engine engine_poll;
+ #define ENGINE_POLL &engine_poll,
+#else
+ #define ENGINE_POLL
+#endif /* HAVE_POLL */
+
/* select() based engine is the fallback engine, we assume it's always available */
extern struct io_engine engine_select;
#define ENGINE_SELECT &engine_select,
@@ -79,6 +93,8 @@ extern struct io_engine engine_select;
* available on your system. Engines must be sorted by order of preference */
static struct io_engine *available_engines[] = {
ENGINE_EPOLL
+ ENGINE_KQUEUE
+ ENGINE_POLL
ENGINE_SELECT
NULL
};
@@ -132,6 +148,12 @@ const char *nsock_list_engines(void) {
return
#if HAVE_EPOLL
"epoll "
+#endif
+#if HAVE_KQUEUE
+ "kqueue "
+#endif
+#if HAVE_POLL
+ "poll "
#endif
"select";
}
diff --git a/nsock/src/nsock_internal.h b/nsock/src/nsock_internal.h
index 47060b936..24aa22dcb 100644
--- a/nsock/src/nsock_internal.h
+++ b/nsock/src/nsock_internal.h
@@ -268,6 +268,7 @@ typedef struct {
gh_list_elem *entry_in_nsp_active_iods;
#define IOD_REGISTERED 0x01
+#define IOD_PROCESSED 0x02 /* internally used by engine_kqueue.c */
#define IOD_PROPSET(iod, flag) ((iod)->_flags |= (flag))
#define IOD_PROPCLR(iod, flag) ((iod)->_flags &= ~(flag))