mirror of
https://github.com/nmap/nmap.git
synced 2025-12-06 04:31:29 +00:00
Centralize initialization of scan_performance_vars.
This commit is contained in:
@@ -245,6 +245,9 @@ struct scan_performance_vars {
|
||||
any drop occurs */
|
||||
double host_drop_ssthresh_divisor; /* used to drop the host ssthresh when
|
||||
any drop occurs */
|
||||
|
||||
/* Do initialization after the global NmapOps table has been filled in. */
|
||||
void init();
|
||||
};
|
||||
|
||||
struct timeout_info {
|
||||
|
||||
22
osscan2.cc
22
osscan2.cc
@@ -311,25 +311,6 @@ int get_ipid_sequence(int numSamples, int *ipids, int islocalhost) {
|
||||
}
|
||||
|
||||
|
||||
/* This is the function for tuning the major values that affect
|
||||
scan performance */
|
||||
static void init_perf_values() {
|
||||
memset(&perf, 0, sizeof(perf));
|
||||
/* TODO: I should revisit these values for tuning. They should probably
|
||||
at least be affected by -T. */
|
||||
perf.low_cwnd = MAX(o.min_parallelism, 1);
|
||||
perf.max_cwnd = o.max_parallelism? o.max_parallelism : 300;
|
||||
perf.group_initial_cwnd = box(o.min_parallelism, perf.max_cwnd, 10);
|
||||
perf.host_initial_cwnd = perf.group_initial_cwnd;
|
||||
perf.slow_incr = 1;
|
||||
perf.ca_incr = 1;
|
||||
perf.initial_ssthresh = 50;
|
||||
perf.group_drop_cwnd_divisor = 2.0;
|
||||
perf.group_drop_ssthresh_divisor = (o.timing_level < 4)? 2.0 : 1.5;
|
||||
perf.host_drop_ssthresh_divisor = (o.timing_level < 4)? 2.0 : 1.5;
|
||||
}
|
||||
|
||||
|
||||
/* Start the timeout clocks of any targets that aren't already timedout */
|
||||
static void startTimeOutClocks(OsScanInfo *OSI) {
|
||||
list<HostOsScanInfo *>::iterator hostI;
|
||||
@@ -3550,8 +3531,7 @@ int OSScan::os_scan_ipv4(vector<Target *> &Targets) {
|
||||
return OP_FAILURE;
|
||||
}
|
||||
|
||||
/* Init the necessary objects to perform the detection */
|
||||
init_perf_values();
|
||||
perf.init();
|
||||
|
||||
OsScanInfo OSI(Targets);
|
||||
if (OSI.numIncompleteHosts() == 0) {
|
||||
|
||||
@@ -126,6 +126,14 @@ struct ultra_scan_performance_vars : public scan_performance_vars {
|
||||
in this many usecs */
|
||||
int pingtime;
|
||||
int tryno_cap; /* The maximum trynumber (starts at zero) allowed */
|
||||
|
||||
void init() {
|
||||
scan_performance_vars::init();
|
||||
ping_magnifier = 3;
|
||||
ping_magnifier = 3;
|
||||
pingtime = 1250000;
|
||||
tryno_cap = o.getMaxRetransmissions();
|
||||
}
|
||||
};
|
||||
|
||||
static const char *pspectype2ascii(int type) {
|
||||
@@ -1429,40 +1437,6 @@ double UltraScanInfo::getCompletionFraction() {
|
||||
return total / gstats->numtargets;
|
||||
}
|
||||
|
||||
/* This is the function for tuning the major values that affect
|
||||
scan performance */
|
||||
static void init_perf_values(struct ultra_scan_performance_vars *perf) {
|
||||
memset(perf, 0, sizeof(*perf));
|
||||
/* TODO: I should revisit these values for tuning. They should probably
|
||||
at least be affected by -T. */
|
||||
perf->low_cwnd = MAX(o.min_parallelism, 1);
|
||||
perf->max_cwnd = o.max_parallelism? o.max_parallelism : 300;
|
||||
perf->group_initial_cwnd = box(o.min_parallelism, perf->max_cwnd, 10);
|
||||
perf->host_initial_cwnd = perf->group_initial_cwnd;
|
||||
perf->slow_incr = 1;
|
||||
/* The congestion window grows faster with more aggressive timing. */
|
||||
if (o.timing_level < 4)
|
||||
perf->ca_incr = 1;
|
||||
else
|
||||
perf->ca_incr = 2;
|
||||
perf->cc_scale_max = 50;
|
||||
perf->initial_ssthresh = 75;
|
||||
perf->ping_magnifier = 3;
|
||||
perf->pingtime = 1250000;
|
||||
perf->group_drop_cwnd_divisor = 2.0;
|
||||
/* Change the amount that ssthresh drops based on the timing level. */
|
||||
double ssthresh_divisor;
|
||||
if (o.timing_level <= 3)
|
||||
ssthresh_divisor = (3.0 / 2.0);
|
||||
else if (o.timing_level <= 4)
|
||||
ssthresh_divisor = (4.0 / 3.0);
|
||||
else
|
||||
ssthresh_divisor = (5.0 / 4.0);
|
||||
perf->group_drop_ssthresh_divisor = ssthresh_divisor;
|
||||
perf->host_drop_ssthresh_divisor = ssthresh_divisor;
|
||||
perf->tryno_cap = o.getMaxRetransmissions();
|
||||
}
|
||||
|
||||
/* Initialize the state for ports that don't receive a response in all the
|
||||
targets. */
|
||||
static void set_default_port_state(vector<Target *> &targets, stype scantype) {
|
||||
@@ -1580,7 +1554,7 @@ void UltraScanInfo::Init(vector<Target *> &Targets, struct scan_lists *pts, styp
|
||||
|
||||
set_default_port_state(Targets, scantype);
|
||||
|
||||
init_perf_values(&perf);
|
||||
perf.init();
|
||||
|
||||
/* Keep a completed host around for a standard TCP MSL (2 min) */
|
||||
completedHostLifetime = 120000;
|
||||
|
||||
29
timing.cc
29
timing.cc
@@ -229,6 +229,35 @@ void enforce_scan_delay(struct timeval *tv) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Do initialization after the global NmapOps table has been filled in. */
|
||||
void scan_performance_vars::init() {
|
||||
/* TODO: I should revisit these values for tuning. They should probably
|
||||
at least be affected by -T. */
|
||||
low_cwnd = MAX(o.min_parallelism, 1);
|
||||
max_cwnd = o.max_parallelism? o.max_parallelism : 300;
|
||||
group_initial_cwnd = box(o.min_parallelism, max_cwnd, 10);
|
||||
host_initial_cwnd = group_initial_cwnd;
|
||||
slow_incr = 1;
|
||||
/* The congestion window grows faster with more aggressive timing. */
|
||||
if (o.timing_level < 4)
|
||||
ca_incr = 1;
|
||||
else
|
||||
ca_incr = 2;
|
||||
cc_scale_max = 50;
|
||||
initial_ssthresh = 75;
|
||||
group_drop_cwnd_divisor = 2.0;
|
||||
/* Change the amount that ssthresh drops based on the timing level. */
|
||||
double ssthresh_divisor;
|
||||
if (o.timing_level <= 3)
|
||||
ssthresh_divisor = (3.0 / 2.0);
|
||||
else if (o.timing_level <= 4)
|
||||
ssthresh_divisor = (4.0 / 3.0);
|
||||
else
|
||||
ssthresh_divisor = (5.0 / 4.0);
|
||||
group_drop_ssthresh_divisor = ssthresh_divisor;
|
||||
host_drop_ssthresh_divisor = ssthresh_divisor;
|
||||
}
|
||||
|
||||
/* current_rate_history defines how far back (in seconds) we look when
|
||||
calculating the current rate. */
|
||||
RateMeter::RateMeter(double current_rate_history) {
|
||||
|
||||
Reference in New Issue
Block a user