1
0
mirror of https://github.com/nmap/nmap.git synced 2025-12-06 04:31:29 +00:00

Factor out timing ack and drop code.

This commit is contained in:
david
2011-12-31 21:59:57 +00:00
parent 8d52190d9d
commit 8ba1cf6b15
4 changed files with 77 additions and 122 deletions

View File

@@ -223,6 +223,13 @@ struct ultra_timing_vals {
to adjust again based on probes sent after that adjustment so a
sudden batch of drops doesn't destroy timing. Init to now */
struct timeval last_drop;
double cc_scale(const struct scan_performance_vars *perf);
void ack(const struct scan_performance_vars *perf, double scale = 1.0);
void drop(unsigned in_flight,
const struct scan_performance_vars *perf, const struct timeval *now);
void drop_group(unsigned in_flight,
const struct scan_performance_vars *perf, const struct timeval *now);
};
/* These are mainly initializers for ultra_timing_vals. */

View File

@@ -1179,15 +1179,6 @@ double HostOsScanStats::timingRatio() {
return (double) msec_taken / msec_ideal;
}
double HostOsScanStats::cc_scale() {
double ratio;
assert(timing.num_replies_received > 0);
ratio = (double) timing.num_replies_expected / timing.num_replies_received;
return MIN(ratio, perf.cc_scale_max);
}
/******************************************************************************
* Implementation of class HostOsScan *
@@ -1237,45 +1228,15 @@ void HostOsScan::adjust_times(HostOsScanStats *hss, OFProbe *probe, struct timev
/* Adjust window */
if (probe->tryno > 0 || !rcvdtime) {
if (TIMEVAL_AFTER(probe->sent, hss->timing.last_drop)) {
hss->timing.cwnd = perf.low_cwnd;
hss->timing.ssthresh = (int) MAX(hss->numProbesActive() / perf.host_drop_ssthresh_divisor, 2);
hss->timing.last_drop = now;
}
if (TIMEVAL_AFTER(probe->sent, stats->timing.last_drop)) {
stats->timing.cwnd = MAX(perf.low_cwnd, stats->timing.cwnd / perf.group_drop_cwnd_divisor);
stats->timing.ssthresh = (int) MAX(stats->num_probes_active / perf.group_drop_ssthresh_divisor, 2);
stats->timing.last_drop = now;
}
if (TIMEVAL_AFTER(probe->sent, hss->timing.last_drop))
hss->timing.drop(hss->numProbesActive(), &perf, &now);
if (TIMEVAL_AFTER(probe->sent, stats->timing.last_drop))
stats->timing.drop_group(stats->num_probes_active, &perf, &now);
} else {
/* Good news -- got a response to first try. Increase window as
appropriate. */
stats->timing.num_replies_received++;
hss->timing.num_replies_received++;
if (stats->timing.cwnd < stats->timing.ssthresh) {
/* In slow start mode */
stats->timing.cwnd += perf.slow_incr * stats->cc_scale();
if (stats->timing.cwnd > stats->timing.ssthresh)
stats->timing.cwnd = stats->timing.ssthresh;
} else {
/* Congestion avoidance mode */
stats->timing.cwnd += perf.ca_incr / stats->timing.cwnd * stats->cc_scale();
}
if (stats->timing.cwnd > perf.max_cwnd)
stats->timing.cwnd = perf.max_cwnd;
if (hss->timing.cwnd < hss->timing.ssthresh) {
/* In slow start mode */
hss->timing.cwnd += perf.slow_incr * hss->cc_scale();
if (hss->timing.cwnd > hss->timing.ssthresh)
hss->timing.cwnd = hss->timing.ssthresh;
} else {
/* Congestion avoidance mode */
hss->timing.cwnd += perf.ca_incr / hss->timing.cwnd * hss->cc_scale();
}
if (hss->timing.cwnd > perf.max_cwnd)
hss->timing.cwnd = perf.max_cwnd;
stats->timing.ack(&perf);
hss->timing.ack(&perf);
}
}
@@ -2204,16 +2165,6 @@ bool ScanStats::sendOK() {
}
double ScanStats::cc_scale() {
double ratio;
assert(timing.num_replies_received > 0);
ratio = (double) timing.num_replies_expected / timing.num_replies_received;
return MIN(ratio, perf.cc_scale_max);
}
/******************************************************************************
* Implementation of class HostOsScan *
******************************************************************************/

View File

@@ -332,10 +332,6 @@ public:
/* The last time waitForResponses finished (initialized to GSS creation time */
int probes_sent; /* Number of probes sent in total. This DOES include pings and retransmissions */
/* Returns the scaling factor to use when incrementing the congestion
window. */
double cc_scale();
/* The most recently received probe response time -- initialized to scan
start time. */
struct timeval lastrcvd;
@@ -558,9 +554,6 @@ public:
bool tryno_mayincrease;
int ports_finished; /* The number of ports of this host that have been determined */
int numprobes_sent; /* Number of port probes (not counting pings, but counting retransmits) sent to this host */
/* Returns the scaling factor to use when incrementing the congestion
window. */
double cc_scale();
/* Boost the scan delay for this host, usually because too many packet
drops were detected. */
void boostScanDelay();
@@ -1060,18 +1053,6 @@ bool GroupScanStats::sendOK(struct timeval *when) {
return false;
}
/* Returns the scaling factor to use when incrementing the congestion window.
This is the minimum of num_replies_expected / num_replies_received and
cc_scale_max. */
double GroupScanStats::cc_scale() {
double ratio;
assert(timing.num_replies_received > 0);
ratio = (double) timing.num_replies_expected / timing.num_replies_received;
return MIN(ratio, USI->perf.cc_scale_max);
}
/* Return true if pingprobe is an appropriate ping probe for the currently
running scan. Because ping probes persist between host discovery and port
scanning stages, it's possible to have a ping probe that is not relevant for
@@ -2210,45 +2191,15 @@ static void ultrascan_adjust_timing(UltraScanInfo *USI, HostScanStats *hss,
if (o.debugging > 1)
log_write(LOG_PLAIN, "Ultrascan DROPPED %sprobe packet to %s detected\n", probe->isPing()? "PING " : "", hss->target->targetipstr());
// Drops often come in big batches, but we only want one decrease per batch.
if (TIMEVAL_AFTER(probe->sent, hss->timing.last_drop)) {
hss->timing.cwnd = USI->perf.low_cwnd;
hss->timing.ssthresh = (int) MAX(hss->num_probes_active / USI->perf.host_drop_ssthresh_divisor, 2);
hss->timing.last_drop = USI->now;
}
if (TIMEVAL_AFTER(probe->sent, USI->gstats->timing.last_drop)) {
USI->gstats->timing.cwnd = MAX(USI->perf.low_cwnd, USI->gstats->timing.cwnd / USI->perf.group_drop_cwnd_divisor);
USI->gstats->timing.ssthresh = (int) MAX(USI->gstats->num_probes_active / USI->perf.group_drop_ssthresh_divisor, 2);
USI->gstats->timing.last_drop = USI->now;
}
if (TIMEVAL_AFTER(probe->sent, hss->timing.last_drop))
hss->timing.drop(hss->num_probes_active, &USI->perf, &USI->now);
if (TIMEVAL_AFTER(probe->sent, USI->gstats->timing.last_drop))
USI->gstats->timing.drop_group(USI->gstats->num_probes_active, &USI->perf, &USI->now);
} else if (rcvdtime != NULL) {
/* Good news -- got a response to first try. Increase window as
appropriate. */
USI->gstats->timing.num_replies_received++;
hss->timing.num_replies_received++;
if (USI->gstats->timing.cwnd < USI->gstats->timing.ssthresh) {
/* In slow start mode */
USI->gstats->timing.cwnd += ping_magnifier * USI->perf.slow_incr * USI->gstats->cc_scale();
if (USI->gstats->timing.cwnd > USI->gstats->timing.ssthresh)
USI->gstats->timing.cwnd = USI->gstats->timing.ssthresh;
} else {
/* Congestion avoidance mode */
USI->gstats->timing.cwnd += ping_magnifier * USI->perf.ca_incr / USI->gstats->timing.cwnd * USI->gstats->cc_scale();
}
if (USI->gstats->timing.cwnd > USI->perf.max_cwnd)
USI->gstats->timing.cwnd = USI->perf.max_cwnd;
if (hss->timing.cwnd < hss->timing.ssthresh) {
/* In slow start mode */
hss->timing.cwnd += ping_magnifier * hss->cc_scale();
if (hss->timing.cwnd > hss->timing.ssthresh)
hss->timing.cwnd = hss->timing.ssthresh;
} else {
/* Congestion avoidance mode */
hss->timing.cwnd += ping_magnifier * USI->perf.ca_incr / hss->timing.cwnd * hss->cc_scale();
}
if (hss->timing.cwnd > USI->perf.max_cwnd)
hss->timing.cwnd = USI->perf.max_cwnd;
USI->gstats->timing.ack(&USI->perf, ping_magnifier);
hss->timing.ack(&USI->perf, ping_magnifier);
}
/* If !probe->isPing() and rcvdtime == NULL, do nothing. */
@@ -2675,18 +2626,6 @@ static bool ultrascan_port_pspec_update(UltraScanInfo *USI,
return oldstate != newstate;
}
/* Returns the scaling factor to use when incrementing the congestion window.
This is the minimum of num_replies_expected / num_replies_received and
cc_scale_max. */
double HostScanStats::cc_scale() {
double ratio;
assert(timing.num_replies_received > 0);
ratio = (double) timing.num_replies_expected / timing.num_replies_received;
return MIN(ratio, USI->perf.cc_scale_max);
}
/* Boost the scan delay for this host, usually because too many packet
drops were detected. */
void HostScanStats::boostScanDelay() {

View File

@@ -229,6 +229,64 @@ void enforce_scan_delay(struct timeval *tv) {
return;
}
/* Returns the scaling factor to use when incrementing the congestion
window. */
double ultra_timing_vals::cc_scale(const struct scan_performance_vars *perf) {
double ratio;
assert(num_replies_received > 0);
ratio = (double) num_replies_expected / num_replies_received;
return MIN(ratio, perf->cc_scale_max);
}
/* Update congestion variables for the receipt of a reply. */
void ultra_timing_vals::ack(const struct scan_performance_vars *perf, double scale) {
num_replies_received++;
if (cwnd < ssthresh) {
/* In slow start mode. "During slow start, a TCP increments cwnd by at most
SMSS bytes for each ACK received that acknowledges new data." */
cwnd += perf->slow_incr * cc_scale(perf) * scale;
if (cwnd > ssthresh)
cwnd = ssthresh;
} else {
/* Congestion avoidance mode. "During congestion avoidance, cwnd is
incremented by 1 full-sized segment per round-trip time (RTT). The
equation
cwnd += SMSS*SMSS/cwnd
provides an acceptable approximation to the underlying principle of
increasing cwnd by 1 full-sized segment per RTT." */
cwnd += perf->ca_incr / cwnd * cc_scale(perf) * scale;
}
if (cwnd > perf->max_cwnd)
cwnd = perf->max_cwnd;
}
/* Update congestion variables for a detected drop. */
void ultra_timing_vals::drop(unsigned in_flight,
const struct scan_performance_vars *perf, const struct timeval *now) {
/* "When a TCP sender detects segment loss using the retransmission timer, the
value of ssthresh MUST be set to no more than the value
ssthresh = max (FlightSize / 2, 2*SMSS)
Furthermore, upon a timeout cwnd MUST be set to no more than the loss
window, LW, which equals 1 full-sized segment (regardless of the value of
IW)." */
cwnd = perf->low_cwnd;
ssthresh = (int) MAX(in_flight / perf->host_drop_ssthresh_divisor, 2);
last_drop = *now;
}
/* Update congestion variables for a detected drop, but less aggressively for
group congestion control. */
void ultra_timing_vals::drop_group(unsigned in_flight,
const struct scan_performance_vars *perf, const struct timeval *now) {
cwnd = MAX(perf->low_cwnd, cwnd / perf->group_drop_cwnd_divisor);
ssthresh = (int) MAX(in_flight / perf->group_drop_ssthresh_divisor, 2);
last_drop = *now;
}
/* Do initialization after the global NmapOps table has been filled in. */
void scan_performance_vars::init() {
/* TODO: I should revisit these values for tuning. They should probably