mirror of
https://github.com/nmap/nmap.git
synced 2026-02-03 03:59:02 +00:00
If you have trouble updating after this revision you need to follow these instructions. You have probably just seen an error like this: svn: URL 'svn://svn.insecure.org/nping' of existing directory 'nping' does not match expected URL 'svn://svn.insecure.org/nmap/nping' This is caused by the replacement of SVN externals. Here's what you need to do. First, save any local changes you might have in the nping, nsock, nbase, ncat, and zenmap directories. (For example by running "cd nping; svn diff > ../nping.diff".) If you don't have any local changes you can skip this step. Then run these commands: rm -rf nping/ nsock/ nbase/ ncat/ zenmap/ svn update svn cleanup If all else fails, you can just delete your whole working directory and check out anew: svn co --username guest --password "" svn://svn.insecure.org/nmap There may be further discussion in the mailing list thread at http://seclists.org/nmap-dev/2011/q4/303.
1384 lines
48 KiB
C
1384 lines
48 KiB
C
/***************************************************************************
|
|
* nsock_core.c -- This contains the core engine routines for the nsock *
|
|
* parallel socket event library. *
|
|
* *
|
|
***********************IMPORTANT NSOCK LICENSE TERMS***********************
|
|
* *
|
|
* The nsock parallel socket event library is (C) 1999-2011 Insecure.Com *
|
|
* LLC This library is free software; you may redistribute and/or *
|
|
* modify it under the terms of the GNU General Public License as *
|
|
* published by the Free Software Foundation; Version 2. This guarantees *
|
|
* your right to use, modify, and redistribute this software under certain *
|
|
* conditions. If this license is unacceptable to you, Insecure.Com LLC *
|
|
* may be willing to sell alternative licenses (contact *
|
|
* sales@insecure.com ). *
|
|
* *
|
|
* As a special exception to the GPL terms, Insecure.Com LLC grants *
|
|
* permission to link the code of this program with any version of the *
|
|
* OpenSSL library which is distributed under a license identical to that *
|
|
* listed in the included docs/licenses/OpenSSL.txt file, and distribute *
|
|
* linked combinations including the two. You must obey the GNU GPL in all *
|
|
* respects for all of the code used other than OpenSSL. If you modify *
|
|
* this file, you may extend this exception to your version of the file, *
|
|
* but you are not obligated to do so. *
|
|
* *
|
|
* If you received these files with a written license agreement stating *
|
|
* terms other than the (GPL) terms above, then that alternative license *
|
|
* agreement takes precedence over this comment. *
|
|
* *
|
|
* Source is provided to this software because we believe users have a *
|
|
* right to know exactly what a program is going to do before they run it. *
|
|
* This also allows you to audit the software for security holes (none *
|
|
* have been found so far). *
|
|
* *
|
|
* Source code also allows you to port Nmap to new platforms, fix bugs, *
|
|
* and add new features. You are highly encouraged to send your changes *
|
|
* to nmap-dev@insecure.org for possible incorporation into the main *
|
|
* distribution. By sending these changes to Fyodor or one of the *
|
|
* Insecure.Org development mailing lists, it is assumed that you are *
|
|
* offering the Nmap Project (Insecure.Com LLC) the unlimited, *
|
|
* non-exclusive right to reuse, modify, and relicense the code. Nmap *
|
|
* will always be available Open Source, but this is important because the *
|
|
* inability to relicense code has caused devastating problems for other *
|
|
* Free Software projects (such as KDE and NASM). We also occasionally *
|
|
* relicense the code to third parties as discussed above. If you wish to *
|
|
* specify special license conditions of your contributions, just say so *
|
|
* when you send them. *
|
|
* *
|
|
* This program is distributed in the hope that it will be useful, but *
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of *
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
|
|
* General Public License v2.0 for more details *
|
|
* (http://www.gnu.org/licenses/gpl-2.0.html). *
|
|
* *
|
|
***************************************************************************/
|
|
|
|
/* $Id$ */
|
|
|
|
#include "nsock_internal.h"
|
|
#include "gh_list.h"
|
|
#include "filespace.h"
|
|
|
|
#include <assert.h>
|
|
#if HAVE_ERRNO_H
|
|
#include <errno.h>
|
|
#endif
|
|
#if HAVE_SYS_TYPES_H
|
|
#include <sys/types.h>
|
|
#endif
|
|
#if HAVE_SYS_SOCKET_H
|
|
#include <sys/socket.h>
|
|
#endif
|
|
#if HAVE_NETINET_IN_H
|
|
#include <netinet/in.h>
|
|
#endif
|
|
#if HAVE_ARPA_INET_H
|
|
#include <arpa/inet.h>
|
|
#endif
|
|
#if HAVE_STRING_H
|
|
#include <string.h>
|
|
#endif
|
|
|
|
#include "netutils.h"
|
|
|
|
#if HAVE_PCAP
|
|
#include "nsock_pcap.h"
|
|
static int pcap_read_on_nonselect(mspool *nsp);
|
|
#endif
|
|
|
|
/* Nsock time of day -- we update this at least once per
|
|
nsock_loop round (and after most calls that are likely to block).
|
|
Other nsock files should grab this
|
|
*/
|
|
struct timeval nsock_tod;
|
|
|
|
#ifdef WIN32
|
|
#define CHECKED_FD_SET FD_SET
|
|
#else
|
|
#define CHECKED_FD_SET(fd, set) \
|
|
do { \
|
|
if ((fd) < FD_SETSIZE) { \
|
|
FD_SET((fd), (set)); \
|
|
} else { \
|
|
fatal("%s:%ld: Attempt to FD_SET fd %d, which is not less than" \
|
|
" FD_SETSIZE (%d). Try using a lower parallelism.", \
|
|
__FILE__, __LINE__, (fd), FD_SETSIZE); \
|
|
} \
|
|
} while (0)
|
|
#endif
|
|
|
|
#ifdef WIN32
|
|
#define CHECKED_FD_CLR FD_CLR
|
|
#else
|
|
#define CHECKED_FD_CLR(fd, set) \
|
|
do { \
|
|
if ((fd) < FD_SETSIZE) { \
|
|
FD_CLR((fd), (set)); \
|
|
} else { \
|
|
fatal("%s:%ld: Attempt to FD_CLR fd %d, which is not less than" \
|
|
" FD_SETSIZE (%d). Try using a lower parallelism.", \
|
|
__FILE__, __LINE__, (fd), FD_SETSIZE); \
|
|
} \
|
|
} while (0)
|
|
#endif
|
|
|
|
/* These macros construct the bodies of the socket_count_*_{inc,dec} functions. */
|
|
#define SOCKET_COUNT_INC(sd, count, fdset, max_sd) \
|
|
do { \
|
|
assert((count) >= 0); \
|
|
(count)++; \
|
|
CHECKED_FD_SET((sd), (fdset)); \
|
|
(max_sd) = MAX((max_sd), (sd)); \
|
|
return 1; \
|
|
} while (0)
|
|
|
|
#define SOCKET_COUNT_DEC(sd, count, fdset, max_sd, iod) \
|
|
do { \
|
|
assert((count) > 0); \
|
|
(count)--; \
|
|
if ((count) == 0) { \
|
|
CHECKED_FD_CLR((sd), (fdset)); \
|
|
assert((iod)->events_pending > 0); \
|
|
if ((iod)->events_pending == 1 && (max_sd) == (sd)) \
|
|
(max_sd)--; \
|
|
} \
|
|
return (count) != 0; \
|
|
} while (0)
|
|
|
|
/* Each iod has a count of pending socket reads, socket writes, and pcap reads.
|
|
When a descriptor's count is nonzero, its bit must be set in the appropriate
|
|
master fd_set, and when the count is zero the bit must be cleared. What we
|
|
are simulating is an fd_set with a counter for each socket instead of just an
|
|
on/off switch. The fd_set's bits aren't enough by itself because a descriptor
|
|
may for example have two reads pending at once, and the bit must not be
|
|
cleared after the first is completed. The socket_count_* functions take care
|
|
of keeping the fd_sets in sync when the counts change. */
|
|
|
|
int socket_count_zero(msiod *iod, mspool *ms) {
|
|
iod->readsd_count = 0;
|
|
iod->writesd_count = 0;
|
|
iod->readpcapsd_count = 0;
|
|
|
|
#if HAVE_PCAP
|
|
if (iod->pcap) {
|
|
int sd = ((mspcap *) iod->pcap)->pcap_desc;
|
|
if (sd >= 0) {
|
|
CHECKED_FD_CLR(sd, &ms->mioi.fds_master_r);
|
|
CHECKED_FD_CLR(sd, &ms->mioi.fds_results_r);
|
|
}
|
|
} else
|
|
#endif
|
|
{
|
|
CHECKED_FD_CLR(iod->sd, &ms->mioi.fds_master_r);
|
|
CHECKED_FD_CLR(iod->sd, &ms->mioi.fds_master_w);
|
|
CHECKED_FD_CLR(iod->sd, &ms->mioi.fds_results_r);
|
|
CHECKED_FD_CLR(iod->sd, &ms->mioi.fds_results_w);
|
|
}
|
|
|
|
if (iod->events_pending == 1 && ms->mioi.max_sd == iod->sd)
|
|
ms->mioi.max_sd--;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int socket_count_read_inc(msiod *iod, mspool *ms)
|
|
{
|
|
SOCKET_COUNT_INC(iod->sd, iod->readsd_count, &ms->mioi.fds_master_r, ms->mioi.max_sd);
|
|
}
|
|
|
|
static int socket_count_read_dec(msiod *iod, mspool *ms) {
|
|
SOCKET_COUNT_DEC(iod->sd, iod->readsd_count, &ms->mioi.fds_master_r, ms->mioi.max_sd, iod);
|
|
}
|
|
|
|
static int socket_count_write_inc(msiod *iod, mspool *ms)
|
|
{
|
|
SOCKET_COUNT_INC(iod->sd, iod->writesd_count, &ms->mioi.fds_master_w, ms->mioi.max_sd);
|
|
}
|
|
|
|
static int socket_count_write_dec(msiod *iod, mspool *ms) {
|
|
SOCKET_COUNT_DEC(iod->sd, iod->writesd_count, &ms->mioi.fds_master_w, ms->mioi.max_sd, iod);
|
|
}
|
|
|
|
#if HAVE_PCAP
|
|
static int socket_count_readpcap_inc(msiod *iod, mspool *ms)
|
|
{
|
|
SOCKET_COUNT_INC(((mspcap *) iod->pcap)->pcap_desc, iod->readpcapsd_count, &ms->mioi.fds_master_r, ms->mioi.max_sd);
|
|
}
|
|
|
|
static int socket_count_readpcap_dec(msiod *iod, mspool *ms) {
|
|
SOCKET_COUNT_DEC(((mspcap *) iod->pcap)->pcap_desc, iod->readpcapsd_count, &ms->mioi.fds_master_r, ms->mioi.max_sd, iod);
|
|
}
|
|
#endif
|
|
|
|
#if HAVE_OPENSSL
|
|
/* Call socket_count_read_dec or socket_count_write_dec on nse->iod depending on
|
|
the current value of nse->sslinfo.ssl_desire. */
|
|
static int socket_count_dec_ssl_desire(msevent *nse, mspool *ms)
|
|
{
|
|
assert(nse->iod->ssl != NULL &&
|
|
(nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ ||
|
|
nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE));
|
|
if (nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ)
|
|
return socket_count_read_dec(nse->iod, ms);
|
|
else
|
|
return socket_count_write_dec(nse->iod, ms);
|
|
}
|
|
#endif
|
|
|
|
/* Returns -1 (and sets ms->errno if there is an error so severe that
|
|
we might as well quit waiting and have nsock_loop() return an error */
|
|
static int wait_for_events(mspool *ms, int msec_timeout) {
|
|
int event_msecs; /* Msecs before an event goes off */
|
|
int combined_msecs;
|
|
int sock_err = 0;
|
|
struct timeval select_tv;
|
|
struct timeval *select_tv_p;
|
|
|
|
assert(msec_timeout >= -1);
|
|
|
|
if (ms->evl.events_pending == 0)
|
|
return 0; /* No need to wait on 0 events ... */
|
|
|
|
do {
|
|
if (ms->tracelevel > 3)
|
|
nsock_trace(ms, "wait_for_events");
|
|
|
|
if (ms->evl.next_ev.tv_sec == 0) {
|
|
event_msecs = -1; /* None of the events specified a timeout */
|
|
} else {
|
|
event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(ms->evl.next_ev, nsock_tod));
|
|
}
|
|
|
|
#if HAVE_PCAP
|
|
#ifndef PCAP_CAN_DO_SELECT
|
|
/* Force a low timeout when capturing packets on systems where
|
|
* the pcap descriptor is not select()able. */
|
|
if (ms->evl.pcap_read_events.count > 0) {
|
|
if (event_msecs > PCAP_POLL_INTERVAL)
|
|
event_msecs = PCAP_POLL_INTERVAL;
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
/* We cast to unsigned because we want -1 to be very high (since it means
|
|
no timeout) */
|
|
combined_msecs = MIN((unsigned) event_msecs, (unsigned) msec_timeout);
|
|
|
|
/* printf("wait_for_events: starting wait -- combined_msecs=%d\n", combined_msecs); */
|
|
/* Set up the timeval pointer we will give to select() */
|
|
memset(&select_tv, 0, sizeof(select_tv));
|
|
if (combined_msecs > 0) {
|
|
select_tv.tv_sec = combined_msecs / 1000;
|
|
select_tv.tv_usec = (combined_msecs % 1000) * 1000;
|
|
select_tv_p = &select_tv;
|
|
} else if (combined_msecs == 0) {
|
|
/* we want the tv_sec and tv_usec to be zero -- but they already are from
|
|
bzero */
|
|
select_tv_p = &select_tv;
|
|
} else {
|
|
assert(combined_msecs == -1);
|
|
select_tv_p = NULL;
|
|
}
|
|
|
|
|
|
#if HAVE_PCAP
|
|
/* do non-blocking read on pcap devices that doesn't support select()
|
|
* If there is anything read, don't do usleep() or select(), just leave this loop */
|
|
if (pcap_read_on_nonselect(ms)) {
|
|
/* okay, something was read. */
|
|
} else
|
|
#endif
|
|
{
|
|
/* Set up the descriptors for select */
|
|
ms->mioi.fds_results_r = ms->mioi.fds_master_r;
|
|
ms->mioi.fds_results_w = ms->mioi.fds_master_w;
|
|
ms->mioi.fds_results_x = ms->mioi.fds_master_x;
|
|
|
|
ms->mioi.results_left = fselect(ms->mioi.max_sd + 1, &ms->mioi.fds_results_r, &ms->mioi.fds_results_w, &ms->mioi.fds_results_x, select_tv_p);
|
|
if (ms->mioi.results_left == -1)
|
|
sock_err = socket_errno();
|
|
}
|
|
|
|
gettimeofday(&nsock_tod, NULL); /* Due to usleep or select delay */
|
|
} while (ms->mioi.results_left == -1 && sock_err == EINTR); // repeat only if signal occured
|
|
|
|
if (ms->mioi.results_left == -1 && sock_err != EINTR) {
|
|
nsock_trace(ms, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err));
|
|
ms->errnum = sock_err;
|
|
return -1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
|
|
/* A handler function is defined for each of the main event types
|
|
(read, write, connect, timer, etc) -- the handler is called when
|
|
new information is available for the event. The handler makes
|
|
any neccessary updates to the event based on any new information
|
|
available. If the event becomes ready for delivery, the handler
|
|
sets nse->event_done and fills out the relevant event fields
|
|
(status, errnum) as applicable. The handlers also take care of
|
|
event type specific teardown (such as clearing socket descriptors
|
|
from select/poll lists). If event_done is not set, the handler
|
|
will be called again in the case of more information or an event
|
|
timeout */
|
|
|
|
/* The event type handlers -- the first three arguments of each are the same:
|
|
mspool *ms
|
|
msevent *nse -- the event we have new info on
|
|
enum nse_status -- The reason for the call, usually NSE_STATUS_SUCCESS
|
|
(which generally means a successful I/O call or
|
|
NSE_STATUS_TIMEOUT or NSE_STATUS_CANCELLED
|
|
|
|
Some of the event type handlers have other parameters, specific
|
|
to their needs. All the handlers can assume that the calling
|
|
function has checked that select or poll said their descriptors
|
|
were readable/writeable (as appropriate).
|
|
|
|
The idea is that each handler will take care of the stuff that is
|
|
specific to it and the calling function will handle the stuff that
|
|
can be generalized to dispatching/deleting/etc. all events. But the
|
|
calling function may use type-specific info to determine whether
|
|
the handler should be called at all (to save CPU time).
|
|
*/
|
|
|
|
/* handle_connect_results assumes that select or poll have already
|
|
shown the descriptor to be active */
|
|
void handle_connect_result(mspool *ms, msevent *nse,
|
|
enum nse_status status)
|
|
{
|
|
int optval;
|
|
socklen_t optlen = sizeof(int);
|
|
char buf[1024];
|
|
msiod *iod = nse->iod;
|
|
#if HAVE_OPENSSL
|
|
int sslerr;
|
|
int sslconnect_inprogress = nse->type == NSE_TYPE_CONNECT_SSL && nse->iod &&
|
|
(nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ ||
|
|
nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE);
|
|
#else
|
|
int sslconnect_inprogress = 0;
|
|
#endif
|
|
int rc;
|
|
rc = 0;
|
|
|
|
if (status == NSE_STATUS_TIMEOUT || status == NSE_STATUS_CANCELLED) {
|
|
nse->status = status;
|
|
nse->event_done = 1;
|
|
} else if (sslconnect_inprogress) {
|
|
/* Do nothing */
|
|
} else if (status == NSE_STATUS_SUCCESS) {
|
|
/* First we want to determine whether the socket really is connected */
|
|
if (getsockopt(iod->sd, SOL_SOCKET, SO_ERROR, (char *) &optval, &optlen) != 0)
|
|
optval = socket_errno(); /* Stupid Solaris */
|
|
|
|
switch(optval) {
|
|
case 0:
|
|
nse->status = NSE_STATUS_SUCCESS;
|
|
break;
|
|
case ECONNREFUSED:
|
|
case EHOSTUNREACH:
|
|
case ENETDOWN:
|
|
case ENETUNREACH:
|
|
case ENETRESET:
|
|
case ECONNABORTED:
|
|
case ETIMEDOUT:
|
|
case EHOSTDOWN:
|
|
case ECONNRESET:
|
|
#ifdef WIN32
|
|
case WSAEADDRNOTAVAIL:
|
|
case WSAEACCES: /* Can happen when Windows Firewall blocks a port. */
|
|
#endif
|
|
#ifndef WIN32
|
|
case EPIPE: /* Has been seen after connect on Linux. */
|
|
case ENOPROTOOPT: /* Also seen on Linux, perhaps in response to protocol unreachable. */
|
|
#endif
|
|
nse->status = NSE_STATUS_ERROR;
|
|
nse->errnum = optval;
|
|
break;
|
|
default:
|
|
Snprintf(buf, sizeof(buf), "Strange connect error from %s (%d)", inet_ntop_ez(&iod->peer, iod->peerlen), optval);
|
|
perror(buf);
|
|
assert(0); /* I'd like for someone to report it */
|
|
break;
|
|
}
|
|
|
|
/* Now special code for the SSL case where the TCP connection was successful. */
|
|
if (nse->type == NSE_TYPE_CONNECT_SSL &&
|
|
nse->status == NSE_STATUS_SUCCESS) {
|
|
#if HAVE_OPENSSL
|
|
assert(ms->sslctx != NULL);
|
|
/* Reuse iod->ssl if present. If set, this is the second try at connection
|
|
without the SSL_OP_NO_SSLv2 option set. */
|
|
if (iod->ssl == NULL) {
|
|
iod->ssl = SSL_new(ms->sslctx);
|
|
if (!iod->ssl)
|
|
fatal("SSL_new failed: %s", ERR_error_string(ERR_get_error(), NULL));
|
|
}
|
|
|
|
if (iod->hostname != NULL) {
|
|
#if HAVE_SSL_SET_TLSEXT_HOST_NAME
|
|
if (SSL_set_tlsext_host_name(iod->ssl, iod->hostname) != 1)
|
|
fatal("SSL_set_tlsext_host_name failed: %s", ERR_error_string(ERR_get_error(), NULL));
|
|
#endif
|
|
}
|
|
|
|
/* Associate our new SSL with the connected socket. It will inherit
|
|
the non-blocking nature of the sd */
|
|
if (SSL_set_fd(iod->ssl, iod->sd) != 1) {
|
|
fatal("SSL_set_fd failed: %s", ERR_error_string(ERR_get_error(), NULL));
|
|
}
|
|
/* Event not done -- need to do SSL connect below */
|
|
nse->sslinfo.ssl_desire = SSL_ERROR_WANT_CONNECT;
|
|
#endif
|
|
} else {
|
|
/* This is not an SSL connect (in which case we are always done), or
|
|
the TCP connect() underlying the SSL failed (in which case we are also
|
|
done */
|
|
nse->event_done = 1;
|
|
}
|
|
} else {
|
|
assert(0); /* Currently we only know about TIMEOUT and SUCCESS callbacks */
|
|
}
|
|
|
|
/* At this point the TCP connection is done, whether successful or not.
|
|
Therefore decrease the read/write listen counts that were incremented in
|
|
nsp_add_event. In the SSL case, we may increase one of the counts depending
|
|
on whether SSL_connect returns an error of SSL_ERROR_WANT_READ or
|
|
SSL_ERROR_WANT_WRITE. In that case we will re-enter this function, but we
|
|
don't want to execute this block again. */
|
|
if (iod->sd != -1 && !sslconnect_inprogress) {
|
|
socket_count_read_dec(iod, ms);
|
|
socket_count_write_dec(iod, ms);
|
|
CHECKED_FD_CLR(iod->sd, &ms->mioi.fds_master_x);
|
|
}
|
|
|
|
#if HAVE_OPENSSL
|
|
if (nse->type == NSE_TYPE_CONNECT_SSL && !nse->event_done) {
|
|
/* Lets now start/continue/finish the connect! */
|
|
if (iod->ssl_session) {
|
|
rc = SSL_set_session(iod->ssl, iod->ssl_session);
|
|
if (rc == 0)
|
|
printf("Uh-oh: SSL_set_session() failed - please tell Fyodor\n");
|
|
iod->ssl_session = NULL; /* No need for this any more */
|
|
}
|
|
|
|
/* If this is a reinvocation of handle_connect_result, clear out the listen
|
|
bits that caused it, based on the previous SSL desire. */
|
|
if (sslconnect_inprogress)
|
|
socket_count_dec_ssl_desire(nse, ms);
|
|
|
|
rc = SSL_connect(iod->ssl);
|
|
/* printf("DBG: SSL_connect()=%d", rc); */
|
|
if (rc == 1) {
|
|
/* Woop! Connect is done! */
|
|
nse->event_done = 1;
|
|
/* Check that certificate verification was okay, if requested. */
|
|
if (nsi_ssl_post_connect_verify(iod)) {
|
|
nse->status = NSE_STATUS_SUCCESS;
|
|
} else {
|
|
if (ms->tracelevel > 0) {
|
|
nsock_trace(ms, "certificate verification error for EID %li: %s",
|
|
nse->id, ERR_error_string(ERR_get_error(), NULL));
|
|
}
|
|
nse->status = NSE_STATUS_ERROR;
|
|
}
|
|
} else {
|
|
long options = SSL_get_options(iod->ssl);
|
|
sslerr = SSL_get_error(iod->ssl, rc);
|
|
if (rc == -1 && sslerr == SSL_ERROR_WANT_READ) {
|
|
nse->sslinfo.ssl_desire = sslerr;
|
|
socket_count_read_inc(iod, ms);
|
|
} else if (rc == -1 && sslerr == SSL_ERROR_WANT_WRITE) {
|
|
nse->sslinfo.ssl_desire = sslerr;
|
|
socket_count_write_inc(iod, ms);
|
|
} else if (!(options & SSL_OP_NO_SSLv2)) {
|
|
/* SSLv3-only and TLSv1-only servers can't be connected to when the
|
|
SSL_OP_NO_SSLv2 option is not set, which is the case when the pool
|
|
was initialized with nsp_ssl_init_max_speed. Try reconnecting with
|
|
SSL_OP_NO_SSLv2. Never downgrade a NO_SSLv2 connection to one that
|
|
might use SSLv2. */
|
|
if (ms->tracelevel > 0)
|
|
nsock_trace(ms, "EID %li reconnecting with SSL_OP_NO_SSLv2", nse->id);
|
|
close(iod->sd);
|
|
nsock_connect_internal(ms, nse, iod->lastproto, &iod->peer, iod->peerlen, nsi_peerport(iod));
|
|
SSL_clear(iod->ssl);
|
|
if(!SSL_clear(iod->ssl))
|
|
fatal("SSL_clear failed: %s", ERR_error_string(ERR_get_error(), NULL));
|
|
|
|
SSL_set_options(iod->ssl, options | SSL_OP_NO_SSLv2);
|
|
socket_count_read_inc(nse->iod, ms);
|
|
socket_count_write_inc(nse->iod, ms);
|
|
nse->sslinfo.ssl_desire = SSL_ERROR_WANT_CONNECT;
|
|
} else {
|
|
if (ms->tracelevel > 0)
|
|
nsock_trace(ms, "EID %li %s", nse->id, ERR_error_string(ERR_get_error(), NULL));
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_ERROR;
|
|
nse->errnum = EIO;
|
|
}
|
|
}
|
|
}
|
|
#endif
|
|
|
|
return;
|
|
}
|
|
|
|
void handle_write_result(mspool *ms, msevent *nse,
|
|
enum nse_status status)
|
|
{
|
|
int bytesleft;
|
|
char *str;
|
|
int res;
|
|
int err;
|
|
msiod *iod = nse->iod;
|
|
|
|
if (status == NSE_STATUS_TIMEOUT || status == NSE_STATUS_CANCELLED) {
|
|
nse->event_done = 1;
|
|
nse->status = status;
|
|
} else if (status == NSE_STATUS_SUCCESS) {
|
|
str = FILESPACE_STR(&nse->iobuf) + nse->writeinfo.written_so_far;
|
|
bytesleft = FILESPACE_LENGTH(&nse->iobuf) - nse->writeinfo.written_so_far;
|
|
if (nse->writeinfo.written_so_far > 0)
|
|
assert(bytesleft > 0);
|
|
#if HAVE_OPENSSL
|
|
if (iod->ssl)
|
|
res = SSL_write(iod->ssl, str, bytesleft);
|
|
else
|
|
#endif
|
|
if (nse->writeinfo.dest.ss_family == AF_UNSPEC )
|
|
res = send(nse->iod->sd, str, bytesleft, 0);
|
|
else
|
|
res = sendto(nse->iod->sd, str, bytesleft, 0, (struct sockaddr *)&nse->writeinfo.dest, (int) nse->writeinfo.destlen);
|
|
if (res == bytesleft) {
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_SUCCESS;
|
|
} else if (res >= 0) {
|
|
nse->writeinfo.written_so_far += res;
|
|
} else {
|
|
assert(res == -1);
|
|
if (iod->ssl) {
|
|
#if HAVE_OPENSSL
|
|
err = SSL_get_error(iod->ssl, res);
|
|
if (err == SSL_ERROR_WANT_READ) {
|
|
socket_count_dec_ssl_desire(nse, ms);
|
|
socket_count_read_inc(iod, ms);
|
|
nse->sslinfo.ssl_desire = err;
|
|
} else if (err == SSL_ERROR_WANT_WRITE) {
|
|
socket_count_dec_ssl_desire(nse, ms);
|
|
socket_count_write_inc(iod, ms);
|
|
nse->sslinfo.ssl_desire = err;
|
|
} else {
|
|
/* Unexpected error */
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_ERROR;
|
|
nse->errnum = EIO;
|
|
}
|
|
#endif
|
|
} else {
|
|
err = socket_errno();
|
|
if (err != EINTR && err != EAGAIN
|
|
#ifndef WIN32
|
|
&& err != EBUSY
|
|
#endif
|
|
) {
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_ERROR;
|
|
nse->errnum = err;
|
|
}
|
|
}
|
|
}
|
|
|
|
nse->iod->write_count+= res;
|
|
}
|
|
|
|
if (nse->event_done && nse->iod->sd != -1) {
|
|
#if HAVE_OPENSSL
|
|
if (nse->iod->ssl != NULL)
|
|
socket_count_dec_ssl_desire(nse, ms);
|
|
else
|
|
#endif
|
|
socket_count_write_dec(nse->iod, ms);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
void handle_timer_result(mspool *ms, msevent *nse,
|
|
enum nse_status status)
|
|
{
|
|
/* Ooh this is a hard job :) */
|
|
|
|
nse->event_done = 1;
|
|
nse->status = status;
|
|
|
|
return;
|
|
}
|
|
|
|
/* Returns -1 if an error, otherwise the number of newly written bytes */
|
|
static int do_actual_read(mspool *ms, msevent *nse) {
|
|
char buf[8192];
|
|
int buflen = 0;
|
|
msiod *iod = nse->iod;
|
|
int err = 0;
|
|
int max_chunk = NSOCK_READ_CHUNK_SIZE;
|
|
int startlen = FILESPACE_LENGTH(&nse->iobuf);
|
|
|
|
if (nse->readinfo.read_type == NSOCK_READBYTES)
|
|
max_chunk = nse->readinfo.num;
|
|
|
|
if (!iod->ssl) {
|
|
do {
|
|
struct sockaddr_storage peer;
|
|
socklen_t peerlen;
|
|
|
|
peerlen = sizeof(peer);
|
|
buflen = recvfrom(iod->sd, buf, sizeof(buf), 0, (struct sockaddr *) &peer, &peerlen);
|
|
/* Using recv() was failing, at least on UNIX, for non-network sockets
|
|
(i.e. stdin) in this case, a read() is done - as on ENOTSOCK we may
|
|
have a non-network socket */
|
|
if (buflen == -1) {
|
|
if (socket_errno() == ENOTSOCK) {
|
|
peer.ss_family = AF_UNSPEC;
|
|
peerlen = 0;
|
|
buflen = read(iod->sd, buf, sizeof(buf));
|
|
}
|
|
}
|
|
if (buflen == -1) {
|
|
err = socket_errno();
|
|
break;
|
|
}
|
|
if (peerlen > 0) {
|
|
assert(peerlen <= sizeof(iod->peer));
|
|
memcpy(&iod->peer, &peer, peerlen);
|
|
iod->peerlen = peerlen;
|
|
}
|
|
if (buflen > 0) {
|
|
if (fscat(&nse->iobuf, buf, buflen) == -1) {
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_ERROR;
|
|
nse->errnum = ENOMEM;
|
|
return -1;
|
|
}
|
|
|
|
/* Sometimes a service just spews and spews data. So we return
|
|
* after a somewhat large amount to avoid monopolizing resources
|
|
* and avoid DOS attacks. */
|
|
if (FILESPACE_LENGTH(&nse->iobuf) > max_chunk)
|
|
return FILESPACE_LENGTH(&nse->iobuf) - startlen;
|
|
|
|
/* No good reason to read again if we we were successful in the read but
|
|
* didn't fill up the buffer. Espcecially for UDP, where we want to
|
|
* return only one datagram at a time. The consistency of the above
|
|
* assignment of iod->peer depends on not consolidating more than one
|
|
* UDP read buffer. */
|
|
if (buflen > 0 && buflen < sizeof(buf))
|
|
return FILESPACE_LENGTH(&nse->iobuf) - startlen;
|
|
}
|
|
} while (buflen > 0 || (buflen == -1 && err == EINTR));
|
|
|
|
if (buflen == -1) {
|
|
if (err != EINTR && err != EAGAIN) {
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_ERROR;
|
|
nse->errnum = err;
|
|
return -1;
|
|
}
|
|
}
|
|
} else {
|
|
#if HAVE_OPENSSL
|
|
/* OpenSSL read */
|
|
while ((buflen = SSL_read(iod->ssl, buf, sizeof(buf))) > 0) {
|
|
|
|
if (fscat(&nse->iobuf, buf, buflen) == -1) {
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_ERROR;
|
|
nse->errnum = ENOMEM;
|
|
return -1;
|
|
}
|
|
|
|
/* Sometimes a service just spews and spews data. So we return
|
|
* after a somewhat large amount to avoid monopolizing resources
|
|
* and avoid DOS attacks. */
|
|
if (FILESPACE_LENGTH(&nse->iobuf) > max_chunk)
|
|
return FILESPACE_LENGTH(&nse->iobuf) - startlen;
|
|
}
|
|
|
|
if (buflen == -1) {
|
|
err = SSL_get_error(iod->ssl, buflen);
|
|
if (err == SSL_ERROR_WANT_READ) {
|
|
socket_count_dec_ssl_desire(nse, ms);
|
|
socket_count_read_inc(iod, ms);
|
|
nse->sslinfo.ssl_desire = err;
|
|
} else if (err == SSL_ERROR_WANT_WRITE ) {
|
|
socket_count_dec_ssl_desire(nse, ms);
|
|
socket_count_write_inc(iod, ms);
|
|
nse->sslinfo.ssl_desire = err;
|
|
} else {
|
|
/* Unexpected error */
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_ERROR;
|
|
nse->errnum = EIO;
|
|
if (ms->tracelevel > 2)
|
|
nsock_trace(ms, "SSL_read() failed for reason %s on NSI %li",
|
|
ERR_reason_error_string(err), iod->id);
|
|
return -1;
|
|
}
|
|
}
|
|
#endif /* HAVE_OPENSSL */
|
|
}
|
|
|
|
if (buflen == 0) {
|
|
nse->event_done = 1;
|
|
nse->eof = 1;
|
|
if (FILESPACE_LENGTH(&nse->iobuf) > 0) {
|
|
nse->status = NSE_STATUS_SUCCESS;
|
|
return FILESPACE_LENGTH(&nse->iobuf) - startlen;
|
|
} else {
|
|
nse->status = NSE_STATUS_EOF;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
return FILESPACE_LENGTH(&nse->iobuf) - startlen;
|
|
}
|
|
|
|
|
|
void handle_read_result(mspool *ms, msevent *nse,
|
|
enum nse_status status)
|
|
{
|
|
unsigned int count;
|
|
char *str;
|
|
int rc, len;
|
|
msiod *iod = nse->iod;
|
|
|
|
if (status == NSE_STATUS_TIMEOUT) {
|
|
nse->event_done = 1;
|
|
if (FILESPACE_LENGTH(&nse->iobuf) > 0) {
|
|
nse->status = NSE_STATUS_SUCCESS;
|
|
} else {
|
|
nse->status = NSE_STATUS_TIMEOUT;
|
|
}
|
|
} else if (status == NSE_STATUS_CANCELLED) {
|
|
nse->status = status;
|
|
nse->event_done = 1;
|
|
} else if (status == NSE_STATUS_SUCCESS) {
|
|
rc = do_actual_read(ms, nse);
|
|
/* printf("DBG: Just read %d new bytes%s.\n", rc, iod->ssl? "( SSL!)" : ""); */
|
|
if (rc > 0) {
|
|
nse->iod->read_count += rc;
|
|
/* We decide whether we have read enough to return */
|
|
switch(nse->readinfo.read_type) {
|
|
case NSOCK_READ:
|
|
nse->status = NSE_STATUS_SUCCESS;
|
|
nse->event_done = 1;
|
|
break;
|
|
case NSOCK_READBYTES:
|
|
if (FILESPACE_LENGTH(&nse->iobuf) >= nse->readinfo.num) {
|
|
nse->status = NSE_STATUS_SUCCESS;
|
|
nse->event_done = 1;
|
|
}
|
|
/* else we are not done */
|
|
break;
|
|
case NSOCK_READLINES:
|
|
/* Lets count the number of lines we have ... */
|
|
count = 0;
|
|
len = FILESPACE_LENGTH(&nse->iobuf) -1;
|
|
str = FILESPACE_STR(&nse->iobuf);
|
|
for(count=0; len >= 0; len--) {
|
|
if (str[len] == '\n') {
|
|
count++;
|
|
if ((int) count >= nse->readinfo.num)
|
|
break;
|
|
}
|
|
}
|
|
if ((int) count >= nse->readinfo.num) {
|
|
nse->event_done = 1;
|
|
nse->status = NSE_STATUS_SUCCESS;
|
|
}
|
|
/* Else we are not done */
|
|
break;
|
|
default:
|
|
assert(0);
|
|
break; /* unreached */
|
|
}
|
|
}
|
|
} else {
|
|
assert(0); /* Currently we only know about TIMEOUT, CANCELLED, and SUCCESS callbacks */
|
|
}
|
|
|
|
/* If there are no more reads for this IOD, we are done reading on the socket
|
|
so we can take it off the descriptor list ... */
|
|
if (nse->event_done && iod->sd >= 0) {
|
|
#if HAVE_OPENSSL
|
|
if (nse->iod->ssl != NULL)
|
|
socket_count_dec_ssl_desire(nse, ms);
|
|
else
|
|
#endif
|
|
socket_count_read_dec(nse->iod, ms);
|
|
}
|
|
|
|
return;
|
|
}
|
|
|
|
#if HAVE_PCAP
|
|
void handle_pcap_read_result(mspool *ms, msevent *nse,
|
|
enum nse_status status)
|
|
{
|
|
msiod *iod = nse->iod;
|
|
mspcap *mp = (mspcap *) iod->pcap;
|
|
|
|
|
|
if (status == NSE_STATUS_TIMEOUT) {
|
|
nse->status = NSE_STATUS_TIMEOUT;
|
|
nse->event_done = 1;
|
|
} else if (status == NSE_STATUS_CANCELLED) {
|
|
nse->status = NSE_STATUS_CANCELLED;
|
|
nse->event_done = 1;
|
|
} else if (status == NSE_STATUS_SUCCESS) {
|
|
/* check if we already have something read */
|
|
if (FILESPACE_LENGTH(&(nse->iobuf)) == 0) {
|
|
nse->status = NSE_STATUS_TIMEOUT;
|
|
nse->event_done = 0;
|
|
} else {
|
|
nse->status = NSE_STATUS_SUCCESS; /* we have full buffer */
|
|
nse->event_done = 1;
|
|
}
|
|
} else {
|
|
assert(0); /* Currently we only know about TIMEOUT, CANCELLED, and SUCCESS callbacks */
|
|
}
|
|
|
|
/* If there are no more read events, we are done reading on the socket so
|
|
we can take it off the descriptor list ... */
|
|
if (nse->event_done && mp->pcap_desc >= 0)
|
|
socket_count_readpcap_dec(iod, ms);
|
|
|
|
return;
|
|
}
|
|
|
|
/* returns number of descriptors on which data was read */
|
|
static int pcap_read_on_nonselect(mspool *nsp) {
|
|
gh_list *event_list = &nsp->evl.pcap_read_events;
|
|
gh_list_elem *current, *next;
|
|
msevent *nse;
|
|
int rc;
|
|
int ret = 0;
|
|
|
|
for (current = GH_LIST_FIRST_ELEM(event_list); current != NULL; current = next) {
|
|
nse = (msevent *) GH_LIST_ELEM_DATA(current);
|
|
rc = do_actual_pcap_read(nse);
|
|
if (rc == 1) { /* something received */
|
|
ret++;
|
|
break;
|
|
}
|
|
next = GH_LIST_ELEM_NEXT(current);
|
|
}
|
|
|
|
return(ret);
|
|
}
|
|
#endif // HAVE_PCAP
|
|
|
|
/* Iterate through all the event lists (such as connect_events, read_events,
|
|
timer_events, etc) and take action for those that have completed (due to
|
|
timeout, i/o, etc) */
|
|
static void iterate_through_event_lists(mspool *nsp) {
|
|
gh_list_elem *current, *next, *prev, *last;
|
|
msevent *nse;
|
|
int match_r = 0, match_w = 0;
|
|
#if HAVE_OPENSSL
|
|
int desire_r = 0, desire_w = 0;
|
|
#endif
|
|
gh_list *event_lists[] = { &nsp->evl.connect_events,
|
|
&nsp->evl.read_events,
|
|
&nsp->evl.write_events,
|
|
&nsp->evl.timer_events,
|
|
#if HAVE_PCAP
|
|
&nsp->evl.pcap_read_events,
|
|
#endif
|
|
0
|
|
};
|
|
int current_list_idx;
|
|
nsp->evl.next_ev.tv_sec = 0; /* Clear it -- We will find the next
|
|
event as we go through the list */
|
|
|
|
/* We keep the events seperate because we want to handle them in the
|
|
order: connect => read => write => timer for several reasons:
|
|
1) Makes sure we have gone through all the net i/o events before
|
|
a timer expires (would be a shame to timeout after the data was
|
|
available but before we delivered the events
|
|
2) The connect() results often lead to a read or write that can be
|
|
processed in the same cycle. In the same way, read() often
|
|
leads to write().
|
|
*/
|
|
|
|
/* foreach list */
|
|
if (nsp->tracelevel > 7){
|
|
for(current_list_idx = 0; event_lists[current_list_idx] != NULL;
|
|
current_list_idx++) {
|
|
nsock_trace(nsp, "before iterating, list %i", current_list_idx);
|
|
for(current = GH_LIST_FIRST_ELEM(event_lists[current_list_idx]);
|
|
current != NULL; current = GH_LIST_ELEM_NEXT(current)) {
|
|
nse = (msevent *) GH_LIST_ELEM_DATA(current);
|
|
nsock_trace(nsp, "before iterating %lu",nse->id);
|
|
}
|
|
}
|
|
}
|
|
/* foreach list */
|
|
for(current_list_idx = 0; event_lists[current_list_idx] != NULL;
|
|
current_list_idx++) {
|
|
/* Remember the last element and don't look past it. This is because, for
|
|
example, handling a read event may add another read event to the end of
|
|
the list, which then adds another, and so on. */
|
|
last = GH_LIST_LAST_ELEM(event_lists[current_list_idx]);
|
|
/* foreach element in the list */
|
|
for (prev = NULL, current = GH_LIST_FIRST_ELEM(event_lists[current_list_idx]);
|
|
current != NULL && prev != last;
|
|
prev = current, current = next) {
|
|
nse = (msevent *) GH_LIST_ELEM_DATA(current);
|
|
if (nsp->tracelevel > 7)
|
|
nsock_trace(nsp, "list %i, iterating %lu",current_list_idx, nse->id);
|
|
|
|
if (!nse->event_done) {
|
|
switch(nse->type) {
|
|
case NSE_TYPE_CONNECT:
|
|
case NSE_TYPE_CONNECT_SSL:
|
|
if (FD_ISSET(nse->iod->sd, &nsp->mioi.fds_results_r) ||
|
|
FD_ISSET(nse->iod->sd, &nsp->mioi.fds_results_w) ||
|
|
FD_ISSET(nse->iod->sd, &nsp->mioi.fds_results_x)) {
|
|
handle_connect_result(nsp, nse, NSE_STATUS_SUCCESS);
|
|
}
|
|
if (!nse->event_done && nse->timeout.tv_sec &&
|
|
!TIMEVAL_AFTER(nse->timeout, nsock_tod)) {
|
|
handle_connect_result(nsp, nse, NSE_STATUS_TIMEOUT);
|
|
}
|
|
break;
|
|
|
|
case NSE_TYPE_READ:
|
|
match_r = FD_ISSET(nse->iod->sd, &nsp->mioi.fds_results_r);
|
|
match_w = FD_ISSET(nse->iod->sd, &nsp->mioi.fds_results_w);
|
|
#if HAVE_OPENSSL
|
|
desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ;
|
|
desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE;
|
|
if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w)))
|
|
handle_read_result(nsp, nse, NSE_STATUS_SUCCESS);
|
|
else
|
|
#endif
|
|
if (!nse->iod->ssl && match_r)
|
|
handle_read_result(nsp, nse, NSE_STATUS_SUCCESS);
|
|
|
|
if (!nse->event_done && nse->timeout.tv_sec &&
|
|
!TIMEVAL_AFTER(nse->timeout, nsock_tod)) {
|
|
handle_read_result(nsp, nse, NSE_STATUS_TIMEOUT);
|
|
}
|
|
break;
|
|
|
|
case NSE_TYPE_WRITE:
|
|
match_r = FD_ISSET(nse->iod->sd, &nsp->mioi.fds_results_r);
|
|
match_w = FD_ISSET(nse->iod->sd, &nsp->mioi.fds_results_w);
|
|
#if HAVE_OPENSSL
|
|
desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ;
|
|
desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE;
|
|
if (nse->iod->ssl && ((desire_r && match_r) ||
|
|
(desire_w && match_w)))
|
|
handle_write_result(nsp, nse, NSE_STATUS_SUCCESS);
|
|
else
|
|
#endif
|
|
if (!nse->iod->ssl && match_w)
|
|
handle_write_result(nsp, nse, NSE_STATUS_SUCCESS);
|
|
|
|
if (!nse->event_done && nse->timeout.tv_sec &&
|
|
!TIMEVAL_AFTER(nse->timeout, nsock_tod)) {
|
|
handle_write_result(nsp, nse, NSE_STATUS_TIMEOUT);
|
|
}
|
|
break;
|
|
|
|
case NSE_TYPE_TIMER:
|
|
if (nse->timeout.tv_sec &&
|
|
!TIMEVAL_AFTER(nse->timeout, nsock_tod)) {
|
|
handle_timer_result(nsp, nse, NSE_STATUS_SUCCESS);
|
|
}
|
|
break;
|
|
|
|
#if HAVE_PCAP
|
|
case NSE_TYPE_PCAP_READ:{
|
|
if (nsp->tracelevel > 5)
|
|
nsock_trace(nsp, "PCAP iterating %lu",nse->id);
|
|
#ifdef PCAP_CAN_DO_SELECT
|
|
match_r = FD_ISSET(((mspcap *) nse->iod->pcap)->pcap_desc, &nsp->mioi.fds_results_r);
|
|
|
|
if (match_r)
|
|
#endif
|
|
{
|
|
/* buffer empty? check it! */
|
|
if ( FILESPACE_LENGTH(&(nse->iobuf))==0 )
|
|
do_actual_pcap_read(nse);
|
|
}
|
|
|
|
/* if already received smth */
|
|
if ( FILESPACE_LENGTH(&(nse->iobuf))>0 )
|
|
handle_pcap_read_result(nsp, nse, NSE_STATUS_SUCCESS);
|
|
|
|
if (!nse->event_done && nse->timeout.tv_sec &&
|
|
!TIMEVAL_AFTER(nse->timeout, nsock_tod))
|
|
handle_pcap_read_result(nsp, nse, NSE_STATUS_TIMEOUT);
|
|
|
|
#if PCAP_BSD_SELECT_HACK
|
|
/* If event occured, and we're in BSD_HACK mode, than this event was added
|
|
* to two queues. evl.read_event and evl.pcap_read_event
|
|
* Of coure we should destroy it only once.
|
|
* I assume we're now in evl.read_event, co just unlink this event from
|
|
* evl.pcap_read_event */
|
|
if (((mspcap *) nse->iod->pcap)->pcap_desc >= 0 &&
|
|
nse->event_done &&
|
|
event_lists[current_list_idx] == &nsp->evl.read_events) {
|
|
/* event is done, list is read_events and we're in BSD_HACK mode.
|
|
* So unlink event from pcap_read_events */
|
|
gh_list_remove(&nsp->evl.pcap_read_events, nse);
|
|
if (nsp->tracelevel > 8)
|
|
nsock_trace(nsp, "PCAP NSE #%lu: Removing event from PCAP_READ_EVENTS", nse->id);
|
|
}
|
|
if (((mspcap *) nse->iod->pcap)->pcap_desc >= 0 &&
|
|
nse->event_done &&
|
|
event_lists[current_list_idx] == &nsp->evl.pcap_read_events) {
|
|
gh_list_remove(&nsp->evl.read_events, nse);
|
|
if (nsp->tracelevel > 8)
|
|
nsock_trace(nsp, "PCAP NSE #%lu: Removing event from READ_EVENTS", nse->id);
|
|
}
|
|
#endif
|
|
break;
|
|
}
|
|
#endif
|
|
default:
|
|
fatal("Event has unknown type (%d)", nse->type);
|
|
break; /* unreached */
|
|
}
|
|
}
|
|
|
|
if (nse->event_done) {
|
|
/* Security sanity check: don't return a functional SSL iod without
|
|
setting an SSL data structure. */
|
|
if (nse->type == NSE_TYPE_CONNECT_SSL && nse->status == NSE_STATUS_SUCCESS)
|
|
assert(nse->iod->ssl != NULL);
|
|
|
|
if (nsp->tracelevel > 8)
|
|
nsock_trace(nsp, "NSE #%lu: Removing event from event_lists[%i]", nse->id, current_list_idx);
|
|
|
|
/* WooHoo! The event is ready to be sent */
|
|
msevent_dispatch_and_delete(nsp, nse, 1);
|
|
next = GH_LIST_ELEM_NEXT(current);
|
|
gh_list_remove_elem(event_lists[current_list_idx], current);
|
|
} else {
|
|
next = GH_LIST_ELEM_NEXT(current);
|
|
/* Is this event the next-to-timeout? */
|
|
if (nse->timeout.tv_sec != 0) {
|
|
if (nsp->evl.next_ev.tv_sec == 0)
|
|
nsp->evl.next_ev = nse->timeout;
|
|
else if (TIMEVAL_AFTER(nsp->evl.next_ev, nse->timeout))
|
|
nsp->evl.next_ev = nse->timeout;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/* Here is the all important looping function that tells the event
|
|
engine to start up and begin processing events. It will continue
|
|
until all events have been delivered (including new ones started
|
|
from event handlers), or the msec_timeout is reached, or a major
|
|
error has occured. Use -1 if you don't want to set a maximum time
|
|
for it to run. A timeout of 0 will return after 1 non-blocking
|
|
loop. The nsock loop can be restarted again after it returns. For
|
|
example you could do a series of 15 second runs, allowing you to do
|
|
other stuff between them */
|
|
enum nsock_loopstatus nsock_loop(nsock_pool nsp, int msec_timeout) {
|
|
mspool *ms = (mspool *) nsp;
|
|
struct timeval loop_timeout;
|
|
int msecs_left;
|
|
unsigned long loopnum = 0;
|
|
enum nsock_loopstatus quitstatus = NSOCK_LOOP_ERROR;
|
|
|
|
gettimeofday(&nsock_tod, NULL);
|
|
|
|
if (msec_timeout < -1) {
|
|
ms->errnum = EINVAL;
|
|
return NSOCK_LOOP_ERROR;
|
|
}
|
|
TIMEVAL_MSEC_ADD(loop_timeout, nsock_tod, msec_timeout);
|
|
msecs_left = msec_timeout;
|
|
|
|
if (ms->tracelevel > 2) {
|
|
if (msec_timeout >= 0) {
|
|
nsock_trace(ms, "nsock_loop() started (timeout=%dms). %d events pending",
|
|
msec_timeout, ms->evl.events_pending);
|
|
} else {
|
|
nsock_trace(ms, "nsock_loop() started (no timeout). %d events pending",
|
|
ms->evl.events_pending);
|
|
}
|
|
}
|
|
|
|
while (1) {
|
|
if (ms->quit) {
|
|
/* We've been asked to quit the loop through nsock_loop_quit. */
|
|
ms->quit = 0;
|
|
quitstatus = NSOCK_LOOP_QUIT;
|
|
break;
|
|
}
|
|
|
|
if (ms->evl.events_pending == 0) {
|
|
/* if no events at all are pending, then none can be created until
|
|
we quit nsock_loop() -- so we do that now. */
|
|
quitstatus = NSOCK_LOOP_NOEVENTS;
|
|
break;
|
|
}
|
|
|
|
if (msec_timeout >= 0) {
|
|
msecs_left = MAX(0, TIMEVAL_MSEC_SUBTRACT(loop_timeout, nsock_tod));
|
|
if (msecs_left == 0 && loopnum > 0) {
|
|
quitstatus = NSOCK_LOOP_TIMEOUT;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (wait_for_events(ms, msecs_left) == -1) {
|
|
quitstatus = NSOCK_LOOP_ERROR;
|
|
break;
|
|
}
|
|
|
|
/* Now we go through the event lists, doing callbacks for those which
|
|
have completed */
|
|
iterate_through_event_lists(ms);
|
|
|
|
gettimeofday(&nsock_tod, NULL); /* we do this at end because there is one
|
|
at beginning of function */
|
|
loopnum++;
|
|
}
|
|
|
|
return quitstatus;
|
|
}
|
|
|
|
/* Calling this function will cause nsock_loop to quit on its next iteration
|
|
with a return value of NSOCK_LOOP_QUIT. */
|
|
void nsock_loop_quit(nsock_pool nsp) {
|
|
mspool *ms = (mspool *) nsp;
|
|
|
|
ms->quit = 1;
|
|
}
|
|
|
|
/* Grab the latest time as recorded by the nsock library, which does
|
|
so at least once per event loop (in main_loop). Not only does this
|
|
function (generally) avoid a system call, but in many circumstances
|
|
it is better to use nsock's time rather than the system time. If
|
|
nsock has never obtained the time when you call it, it will do so
|
|
before returning */
|
|
const struct timeval *nsock_gettimeofday() {
|
|
if (nsock_tod.tv_sec == 0)
|
|
gettimeofday(&nsock_tod, NULL);
|
|
|
|
return &nsock_tod;
|
|
}
|
|
|
|
|
|
/* Adds an event to the appropriate nsp event list, handles housekeeping
|
|
such as adjusting the descriptor select/poll lists, registering the
|
|
timeout value, etc. */
|
|
void nsp_add_event(mspool *nsp, msevent *nse) {
|
|
if (nsp->tracelevel > 5)
|
|
nsock_trace(nsp, "NSE #%lu: Adding event", nse->id);
|
|
|
|
/* First lets do the event-type independant stuff -- starting with
|
|
timeouts */
|
|
if (nse->event_done) {
|
|
nsp->evl.next_ev = nsock_tod;
|
|
} else {
|
|
if (nse->timeout.tv_sec != 0) {
|
|
if (nsp->evl.next_ev.tv_sec == 0) {
|
|
nsp->evl.next_ev = nse->timeout;
|
|
} else if (TIMEVAL_AFTER(nsp->evl.next_ev, nse->timeout)) {
|
|
nsp->evl.next_ev = nse->timeout;
|
|
}
|
|
}
|
|
}
|
|
|
|
nsp->evl.events_pending++;
|
|
|
|
/* Now we do the event type specific actions */
|
|
switch(nse->type) {
|
|
case NSE_TYPE_CONNECT:
|
|
case NSE_TYPE_CONNECT_SSL:
|
|
if (!nse->event_done) {
|
|
assert(nse->iod->sd >= 0);
|
|
socket_count_read_inc(nse->iod, nsp);
|
|
socket_count_write_inc(nse->iod, nsp);
|
|
CHECKED_FD_SET( nse->iod->sd, &nsp->mioi.fds_master_x);
|
|
nsp->mioi.max_sd = MAX(nsp->mioi.max_sd, nse->iod->sd);
|
|
}
|
|
gh_list_append(&nsp->evl.connect_events, nse);
|
|
break;
|
|
|
|
case NSE_TYPE_READ:
|
|
if (!nse->event_done) {
|
|
assert(nse->iod->sd >= 0);
|
|
socket_count_read_inc(nse->iod, nsp);
|
|
#if HAVE_OPENSSL
|
|
if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_READ;
|
|
#endif
|
|
}
|
|
gh_list_append(&nsp->evl.read_events, nse);
|
|
break;
|
|
|
|
case NSE_TYPE_WRITE:
|
|
if (!nse->event_done) {
|
|
assert(nse->iod->sd >= 0);
|
|
socket_count_write_inc(nse->iod, nsp);
|
|
#if HAVE_OPENSSL
|
|
if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_WRITE;
|
|
#endif
|
|
}
|
|
gh_list_append(&nsp->evl.write_events, nse);
|
|
break;
|
|
|
|
case NSE_TYPE_TIMER:
|
|
gh_list_append(&nsp->evl.timer_events, nse);
|
|
break;
|
|
|
|
#if HAVE_PCAP
|
|
case NSE_TYPE_PCAP_READ:{
|
|
mspcap *mp = (mspcap *) nse->iod->pcap;
|
|
assert(mp);
|
|
if(mp->pcap_desc >= 0){ /* pcap descriptor present */
|
|
if(!nse->event_done)
|
|
socket_count_readpcap_inc(nse->iod, nsp);
|
|
if (nsp->tracelevel > 8)
|
|
nsock_trace(nsp, "PCAP NSE #%lu: Adding event to READ_EVENTS", nse->id);
|
|
gh_list_append(&nsp->evl.read_events, nse);
|
|
|
|
#if PCAP_BSD_SELECT_HACK
|
|
/* when using BSD hack we must do pcap_next() after select().
|
|
* Let's insert this pcap to bot queues, to selectable and nonselectable.
|
|
* This will result in doing pcap_next_ex() just before select() */
|
|
if (nsp->tracelevel > 8)
|
|
nsock_trace(nsp, "PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id);
|
|
gh_list_append(&nsp->evl.pcap_read_events, nse);
|
|
#endif
|
|
}else{ /* pcap isn't selectable. Add it to pcap-specific queue. */
|
|
if (nsp->tracelevel > 8)
|
|
nsock_trace(nsp, "PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id);
|
|
gh_list_append(&nsp->evl.pcap_read_events, nse);
|
|
}
|
|
break;
|
|
}
|
|
#endif
|
|
|
|
default:
|
|
assert(0);
|
|
break; /* unreached */
|
|
}
|
|
}
|
|
|
|
void nsock_trace(mspool *ms, char *fmt, ...) {
|
|
va_list ap;
|
|
int elapsedTimeMS;
|
|
|
|
assert(ms->tracefile != NULL);
|
|
elapsedTimeMS = TIMEVAL_MSEC_SUBTRACT(nsock_tod, ms->tracebasetime);
|
|
va_start(ap, fmt);
|
|
fflush(ms->tracefile);
|
|
fprintf(ms->tracefile, "NSOCK (%.4fs) ", elapsedTimeMS / 1000.0);
|
|
vfprintf(ms->tracefile, fmt, ap);
|
|
fprintf(ms->tracefile, "\n");
|
|
va_end(ap);
|
|
return;
|
|
}
|
|
|
|
/* An event has been completed and the handler is about to be called. This
|
|
function writes out tracing data about the event if necessary */
|
|
void nsock_trace_handler_callback(mspool *ms, msevent *nse) {
|
|
msiod *nsi;
|
|
char *str;
|
|
int strlength = 0;
|
|
char displaystr[256];
|
|
char errstr[256];
|
|
|
|
if (ms->tracelevel == 0)
|
|
return;
|
|
|
|
nsi = nse->iod;
|
|
|
|
if (nse->status == NSE_STATUS_ERROR) {
|
|
Snprintf(errstr, sizeof(errstr), "[%s (%d)] ", strerror(nse->errnum), nse->errnum);
|
|
} else {
|
|
errstr[0] = '\0';
|
|
}
|
|
|
|
/* Some types have special tracing treatment */
|
|
switch(nse->type) {
|
|
case NSE_TYPE_CONNECT:
|
|
case NSE_TYPE_CONNECT_SSL:
|
|
nsock_trace(ms, "Callback: %s %s %sfor EID %li [%s:%hu]",
|
|
nse_type2str(nse->type), nse_status2str(nse->status), errstr,
|
|
nse->id, inet_ntop_ez(&nsi->peer, nsi->peerlen), nsi_peerport(nsi));
|
|
break;
|
|
|
|
case NSE_TYPE_READ:
|
|
if (nse->status != NSE_STATUS_SUCCESS) {
|
|
if (nsi->peerlen > 0) {
|
|
nsock_trace(ms, "Callback: %s %s %sfor EID %li [%s:%hu]",
|
|
nse_type2str(nse->type), nse_status2str(nse->status),
|
|
errstr, nse->id, inet_ntop_ez(&nsi->peer, nsi->peerlen),
|
|
nsi_peerport(nsi));
|
|
} else {
|
|
nsock_trace(ms, "Callback: %s %s %sfor EID %li (peer unspecified)",
|
|
nse_type2str(nse->type), nse_status2str(nse->status),
|
|
errstr, nse->id);
|
|
}
|
|
} else {
|
|
str = nse_readbuf(nse, &strlength);
|
|
if (ms->tracelevel > 1 && strlength < 80) {
|
|
memcpy(displaystr, ": ", 2);
|
|
memcpy(displaystr + 2, str, strlength);
|
|
displaystr[2 + strlength] = '\0';
|
|
replacenonprintable(displaystr + 2, strlength, '.');
|
|
} else {
|
|
displaystr[0] = '\0';
|
|
}
|
|
|
|
if (nsi->peerlen > 0) {
|
|
nsock_trace(ms, "Callback: %s %s for EID %li [%s:%hu] %s(%d bytes)%s",
|
|
nse_type2str(nse->type), nse_status2str(nse->status),
|
|
nse->id, inet_ntop_ez(&nsi->peer, nsi->peerlen),
|
|
nsi_peerport(nsi), nse_eof(nse)? "[EOF]" : "", strlength,
|
|
displaystr);
|
|
} else {
|
|
nsock_trace(ms, "Callback %s %s for EID %li (peer unspecified) %s(%d bytes)%s",
|
|
nse_type2str(nse->type), nse_status2str(nse->status),
|
|
nse->id, nse_eof(nse)? "[EOF]" : "", strlength, displaystr);
|
|
}
|
|
}
|
|
break;
|
|
|
|
case NSE_TYPE_WRITE:
|
|
nsock_trace(ms, "Callback: %s %s %sfor EID %li [%s:%hu]",
|
|
nse_type2str(nse->type), nse_status2str(nse->status), errstr,
|
|
nse->id, inet_ntop_ez(&nsi->peer, nsi->peerlen),
|
|
nsi_peerport(nsi));
|
|
break;
|
|
case NSE_TYPE_TIMER:
|
|
nsock_trace(ms, "Callback: %s %s %sfor EID %li",
|
|
nse_type2str(nse->type), nse_status2str(nse->status), errstr,
|
|
nse->id);
|
|
|
|
break;
|
|
#if HAVE_PCAP
|
|
case NSE_TYPE_PCAP_READ:
|
|
nsock_trace(ms, "Callback: %s %s %sfor EID %li ",
|
|
nse_type2str(nse->type), nse_status2str(nse->status),
|
|
errstr, nse->id);
|
|
break;
|
|
#endif
|
|
default:
|
|
assert(0);
|
|
break;
|
|
}
|
|
}
|