mirror of
https://github.com/nmap/nmap.git
synced 2025-12-06 04:31:29 +00:00
Merge from /nmap-exp/luis/nmap-os6.
svn merge --ignore-ancestry svn://svn.insecure.org/nmap@26621 svn://svn.insecure.org/nmap-exp/luis/nmap-os6 This is the IPv6 OS detection branch. "nmap -6 -O" works now, though at this point it only prints fingerprints and not OS guesses, because we need to collect more submissions.
This commit is contained in:
31
liblinear/COPYRIGHT
Normal file
31
liblinear/COPYRIGHT
Normal file
@@ -0,0 +1,31 @@
|
||||
|
||||
Copyright (c) 2007-2011 The LIBLINEAR Project.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions
|
||||
are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither name of copyright holders nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
|
||||
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
|
||||
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
|
||||
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
|
||||
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
|
||||
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
36
liblinear/Makefile
Normal file
36
liblinear/Makefile
Normal file
@@ -0,0 +1,36 @@
|
||||
CXX ?= g++
|
||||
CC ?= gcc
|
||||
CFLAGS = -Wall -Wconversion -O3 -fPIC
|
||||
LIBS = blas/blas.a
|
||||
SHVER = 1
|
||||
AR = ar
|
||||
RANLIB = ranlib
|
||||
#LIBS = -lblas
|
||||
|
||||
all: train predict
|
||||
|
||||
lib: linear.o tron.o blas/blas.a
|
||||
$(CXX) -shared -dynamiclib linear.o tron.o blas/blas.a -o liblinear.so.$(SHVER)
|
||||
|
||||
liblinear.a: linear.o tron.o blas/blas.a
|
||||
$(AR) rcv liblinear.a linear.o tron.o blas/*.o
|
||||
$(RANLIB) liblinear.a
|
||||
|
||||
train: tron.o linear.o train.c blas/blas.a
|
||||
$(CXX) $(CFLAGS) -o train train.c tron.o linear.o $(LIBS)
|
||||
|
||||
predict: tron.o linear.o predict.c blas/blas.a
|
||||
$(CXX) $(CFLAGS) -o predict predict.c tron.o linear.o $(LIBS)
|
||||
|
||||
tron.o: tron.cpp tron.h
|
||||
$(CXX) $(CFLAGS) -c -o tron.o tron.cpp
|
||||
|
||||
linear.o: linear.cpp linear.h
|
||||
$(CXX) $(CFLAGS) -c -o linear.o linear.cpp
|
||||
|
||||
blas/blas.a:
|
||||
cd blas; make OPTFLAGS='$(CFLAGS)' CC='$(CC)';
|
||||
|
||||
clean:
|
||||
cd blas; make clean
|
||||
rm -f *~ tron.o linear.o train predict liblinear.so.$(SHVER) liblinear.a
|
||||
30
liblinear/Makefile.win
Normal file
30
liblinear/Makefile.win
Normal file
@@ -0,0 +1,30 @@
|
||||
#You must ensure nmake.exe, cl.exe, link.exe are in system path.
|
||||
#VCVARS32.bat
|
||||
#Under dosbox prompt
|
||||
#nmake -f Makefile.win
|
||||
|
||||
##########################################
|
||||
CXXC = cl.exe
|
||||
CFLAGS = -nologo -O2 -EHsc -I. -D __WIN32__ -D _CRT_SECURE_NO_DEPRECATE
|
||||
TARGET = windows
|
||||
|
||||
all: $(TARGET)\train.exe $(TARGET)\predict.exe
|
||||
|
||||
$(TARGET)\train.exe: tron.obj linear.obj train.c blas\*.c
|
||||
$(CXX) $(CFLAGS) -Fe$(TARGET)\train.exe tron.obj linear.obj train.c blas\*.c
|
||||
|
||||
$(TARGET)\predict.exe: tron.obj linear.obj predict.c blas\*.c
|
||||
$(CXX) $(CFLAGS) -Fe$(TARGET)\predict.exe tron.obj linear.obj predict.c blas\*.c
|
||||
|
||||
linear.obj: linear.cpp linear.h
|
||||
$(CXX) $(CFLAGS) -c linear.cpp
|
||||
|
||||
tron.obj: tron.cpp tron.h
|
||||
$(CXX) $(CFLAGS) -c tron.cpp
|
||||
|
||||
lib: linear.cpp linear.h linear.def tron.obj
|
||||
$(CXX) $(CFLAGS) -LD linear.cpp tron.obj blas\*.c -Fe$(TARGET)\liblinear -link -DEF:linear.def
|
||||
|
||||
clean:
|
||||
-erase /Q *.obj $(TARGET)\.
|
||||
|
||||
485
liblinear/README
Normal file
485
liblinear/README
Normal file
@@ -0,0 +1,485 @@
|
||||
LIBLINEAR is a simple package for solving large-scale regularized
|
||||
linear classification. It currently supports L2-regularized logistic
|
||||
regression/L2-loss support vector classification/L1-loss support vector
|
||||
classification, and L1-regularized L2-loss support vector classification/
|
||||
logistic regression. This document explains the usage of LIBLINEAR.
|
||||
|
||||
To get started, please read the ``Quick Start'' section first.
|
||||
For developers, please check the ``Library Usage'' section to learn
|
||||
how to integrate LIBLINEAR in your software.
|
||||
|
||||
Table of Contents
|
||||
=================
|
||||
|
||||
- When to use LIBLINEAR but not LIBSVM
|
||||
- Quick Start
|
||||
- Installation
|
||||
- `train' Usage
|
||||
- `predict' Usage
|
||||
- Examples
|
||||
- Library Usage
|
||||
- Building Windows Binaries
|
||||
- Additional Information
|
||||
- MATLAB/OCTAVE interface
|
||||
- PYTHON interface
|
||||
|
||||
When to use LIBLINEAR but not LIBSVM
|
||||
====================================
|
||||
|
||||
There are some large data for which with/without nonlinear mappings
|
||||
gives similar performances. Without using kernels, one can
|
||||
efficiently train a much larger set via a linear classifier. These
|
||||
data usually have a large number of features. Document classification
|
||||
is an example.
|
||||
|
||||
Warning: While generally liblinear is very fast, its default solver
|
||||
may be slow under certain situations (e.g., data not scaled or C is
|
||||
large). See Appendix B of our SVM guide about how to handle such
|
||||
cases.
|
||||
http://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf
|
||||
|
||||
Warning: If you are a beginner and your data sets are not large, you
|
||||
should consider LIBSVM first.
|
||||
|
||||
LIBSVM page:
|
||||
http://www.csie.ntu.edu.tw/~cjlin/libsvm
|
||||
|
||||
|
||||
Quick Start
|
||||
===========
|
||||
|
||||
See the section ``Installation'' for installing LIBLINEAR.
|
||||
|
||||
After installation, there are programs `train' and `predict' for
|
||||
training and testing, respectively.
|
||||
|
||||
About the data format, please check the README file of LIBSVM. Note
|
||||
that feature index must start from 1 (but not 0).
|
||||
|
||||
A sample classification data included in this package is `heart_scale'.
|
||||
|
||||
Type `train heart_scale', and the program will read the training
|
||||
data and output the model file `heart_scale.model'. If you have a test
|
||||
set called heart_scale.t, then type `predict heart_scale.t
|
||||
heart_scale.model output' to see the prediction accuracy. The `output'
|
||||
file contains the predicted class labels.
|
||||
|
||||
For more information about `train' and `predict', see the sections
|
||||
`train' Usage and `predict' Usage.
|
||||
|
||||
To obtain good performances, sometimes one needs to scale the
|
||||
data. Please check the program `svm-scale' of LIBSVM. For large and
|
||||
sparse data, use `-l 0' to keep the sparsity.
|
||||
|
||||
Installation
|
||||
============
|
||||
|
||||
On Unix systems, type `make' to build the `train' and `predict'
|
||||
programs. Run them without arguments to show the usages.
|
||||
|
||||
On other systems, consult `Makefile' to build them (e.g., see
|
||||
'Building Windows binaries' in this file) or use the pre-built
|
||||
binaries (Windows binaries are in the directory `windows').
|
||||
|
||||
This software uses some level-1 BLAS subroutines. The needed functions are
|
||||
included in this package. If a BLAS library is available on your
|
||||
machine, you may use it by modifying the Makefile: Unmark the following line
|
||||
|
||||
#LIBS ?= -lblas
|
||||
|
||||
and mark
|
||||
|
||||
LIBS ?= blas/blas.a
|
||||
|
||||
`train' Usage
|
||||
=============
|
||||
|
||||
Usage: train [options] training_set_file [model_file]
|
||||
options:
|
||||
-s type : set type of solver (default 1)
|
||||
0 -- L2-regularized logistic regression (primal)
|
||||
1 -- L2-regularized L2-loss support vector classification (dual)
|
||||
2 -- L2-regularized L2-loss support vector classification (primal)
|
||||
3 -- L2-regularized L1-loss support vector classification (dual)
|
||||
4 -- multi-class support vector classification by Crammer and Singer
|
||||
5 -- L1-regularized L2-loss support vector classification
|
||||
6 -- L1-regularized logistic regression
|
||||
7 -- L2-regularized logistic regression (dual)
|
||||
-c cost : set the parameter C (default 1)
|
||||
-e epsilon : set tolerance of termination criterion
|
||||
-s 0 and 2
|
||||
|f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,
|
||||
where f is the primal function and pos/neg are # of
|
||||
positive/negative data (default 0.01)
|
||||
-s 1, 3, 4 and 7
|
||||
Dual maximal violation <= eps; similar to libsvm (default 0.1)
|
||||
-s 5 and 6
|
||||
|f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf,
|
||||
where f is the primal function (default 0.01)
|
||||
-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)
|
||||
-wi weight: weights adjust the parameter C of different classes (see README for details)
|
||||
-v n: n-fold cross validation mode
|
||||
-q : quiet mode (no outputs)
|
||||
|
||||
Option -v randomly splits the data into n parts and calculates cross
|
||||
validation accuracy on them.
|
||||
|
||||
Formulations:
|
||||
|
||||
For L2-regularized logistic regression (-s 0), we solve
|
||||
|
||||
min_w w^Tw/2 + C \sum log(1 + exp(-y_i w^Tx_i))
|
||||
|
||||
For L2-regularized L2-loss SVC dual (-s 1), we solve
|
||||
|
||||
min_alpha 0.5(alpha^T (Q + I/2/C) alpha) - e^T alpha
|
||||
s.t. 0 <= alpha_i,
|
||||
|
||||
For L2-regularized L2-loss SVC (-s 2), we solve
|
||||
|
||||
min_w w^Tw/2 + C \sum max(0, 1- y_i w^Tx_i)^2
|
||||
|
||||
For L2-regularized L1-loss SVC dual (-s 3), we solve
|
||||
|
||||
min_alpha 0.5(alpha^T Q alpha) - e^T alpha
|
||||
s.t. 0 <= alpha_i <= C,
|
||||
|
||||
For L1-regularized L2-loss SVC (-s 5), we solve
|
||||
|
||||
min_w \sum |w_j| + C \sum max(0, 1- y_i w^Tx_i)^2
|
||||
|
||||
For L1-regularized logistic regression (-s 6), we solve
|
||||
|
||||
min_w \sum |w_j| + C \sum log(1 + exp(-y_i w^Tx_i))
|
||||
|
||||
where
|
||||
|
||||
Q is a matrix with Q_ij = y_i y_j x_i^T x_j.
|
||||
|
||||
For L2-regularized logistic regression (-s 7), we solve
|
||||
|
||||
min_alpha 0.5(alpha^T Q alpha) + \sum alpha_i*log(alpha_i) + \sum (C-alpha_i)*log(C-alpha_i) - a constant
|
||||
s.t. 0 <= alpha_i <= C,
|
||||
|
||||
If bias >= 0, w becomes [w; w_{n+1}] and x becomes [x; bias].
|
||||
|
||||
The primal-dual relationship implies that -s 1 and -s 2 give the same
|
||||
model, and -s 0 and -s 7 give the same.
|
||||
|
||||
We implement 1-vs-the rest multi-class strategy. In training i
|
||||
vs. non_i, their C parameters are (weight from -wi)*C and C,
|
||||
respectively. If there are only two classes, we train only one
|
||||
model. Thus weight1*C vs. weight2*C is used. See examples below.
|
||||
|
||||
We also implement multi-class SVM by Crammer and Singer (-s 4):
|
||||
|
||||
min_{w_m, \xi_i} 0.5 \sum_m ||w_m||^2 + C \sum_i \xi_i
|
||||
s.t. w^T_{y_i} x_i - w^T_m x_i >= \e^m_i - \xi_i \forall m,i
|
||||
|
||||
where e^m_i = 0 if y_i = m,
|
||||
e^m_i = 1 if y_i != m,
|
||||
|
||||
Here we solve the dual problem:
|
||||
|
||||
min_{\alpha} 0.5 \sum_m ||w_m(\alpha)||^2 + \sum_i \sum_m e^m_i alpha^m_i
|
||||
s.t. \alpha^m_i <= C^m_i \forall m,i , \sum_m \alpha^m_i=0 \forall i
|
||||
|
||||
where w_m(\alpha) = \sum_i \alpha^m_i x_i,
|
||||
and C^m_i = C if m = y_i,
|
||||
C^m_i = 0 if m != y_i.
|
||||
|
||||
`predict' Usage
|
||||
===============
|
||||
|
||||
Usage: predict [options] test_file model_file output_file
|
||||
options:
|
||||
-b probability_estimates: whether to predict probability estimates, 0 or 1 (default 0)
|
||||
|
||||
Examples
|
||||
========
|
||||
|
||||
> train data_file
|
||||
|
||||
Train linear SVM with L2-loss function.
|
||||
|
||||
> train -s 0 data_file
|
||||
|
||||
Train a logistic regression model.
|
||||
|
||||
> train -v 5 -e 0.001 data_file
|
||||
|
||||
Do five-fold cross-validation using L2-loss svm.
|
||||
Use a smaller stopping tolerance 0.001 than the default
|
||||
0.1 if you want more accurate solutions.
|
||||
|
||||
> train -c 10 -w1 2 -w2 5 -w3 2 four_class_data_file
|
||||
|
||||
Train four classifiers:
|
||||
positive negative Cp Cn
|
||||
class 1 class 2,3,4. 20 10
|
||||
class 2 class 1,3,4. 50 10
|
||||
class 3 class 1,2,4. 20 10
|
||||
class 4 class 1,2,3. 10 10
|
||||
|
||||
> train -c 10 -w3 1 -w2 5 two_class_data_file
|
||||
|
||||
If there are only two classes, we train ONE model.
|
||||
The C values for the two classes are 10 and 50.
|
||||
|
||||
> predict -b 1 test_file data_file.model output_file
|
||||
|
||||
Output probability estimates (for logistic regression only).
|
||||
|
||||
Library Usage
|
||||
=============
|
||||
|
||||
- Function: model* train(const struct problem *prob,
|
||||
const struct parameter *param);
|
||||
|
||||
This function constructs and returns a linear classification model
|
||||
according to the given training data and parameters.
|
||||
|
||||
struct problem describes the problem:
|
||||
|
||||
struct problem
|
||||
{
|
||||
int l, n;
|
||||
int *y;
|
||||
struct feature_node **x;
|
||||
double bias;
|
||||
};
|
||||
|
||||
where `l' is the number of training data. If bias >= 0, we assume
|
||||
that one additional feature is added to the end of each data
|
||||
instance. `n' is the number of feature (including the bias feature
|
||||
if bias >= 0). `y' is an array containing the target values. And
|
||||
`x' is an array of pointers,
|
||||
each of which points to a sparse representation (array of feature_node) of one
|
||||
training vector.
|
||||
|
||||
For example, if we have the following training data:
|
||||
|
||||
LABEL ATTR1 ATTR2 ATTR3 ATTR4 ATTR5
|
||||
----- ----- ----- ----- ----- -----
|
||||
1 0 0.1 0.2 0 0
|
||||
2 0 0.1 0.3 -1.2 0
|
||||
1 0.4 0 0 0 0
|
||||
2 0 0.1 0 1.4 0.5
|
||||
3 -0.1 -0.2 0.1 1.1 0.1
|
||||
|
||||
and bias = 1, then the components of problem are:
|
||||
|
||||
l = 5
|
||||
n = 6
|
||||
|
||||
y -> 1 2 1 2 3
|
||||
|
||||
x -> [ ] -> (2,0.1) (3,0.2) (6,1) (-1,?)
|
||||
[ ] -> (2,0.1) (3,0.3) (4,-1.2) (6,1) (-1,?)
|
||||
[ ] -> (1,0.4) (6,1) (-1,?)
|
||||
[ ] -> (2,0.1) (4,1.4) (5,0.5) (6,1) (-1,?)
|
||||
[ ] -> (1,-0.1) (2,-0.2) (3,0.1) (4,1.1) (5,0.1) (6,1) (-1,?)
|
||||
|
||||
struct parameter describes the parameters of a linear classification model:
|
||||
|
||||
struct parameter
|
||||
{
|
||||
int solver_type;
|
||||
|
||||
/* these are for training only */
|
||||
double eps; /* stopping criteria */
|
||||
double C;
|
||||
int nr_weight;
|
||||
int *weight_label;
|
||||
double* weight;
|
||||
};
|
||||
|
||||
solver_type can be one of L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL.
|
||||
|
||||
L2R_LR L2-regularized logistic regression (primal)
|
||||
L2R_L2LOSS_SVC_DUAL L2-regularized L2-loss support vector classification (dual)
|
||||
L2R_L2LOSS_SVC L2-regularized L2-loss support vector classification (primal)
|
||||
L2R_L1LOSS_SVC_DUAL L2-regularized L1-loss support vector classification (dual)
|
||||
MCSVM_CS multi-class support vector classification by Crammer and Singer
|
||||
L1R_L2LOSS_SVC L1-regularized L2-loss support vector classification
|
||||
L1R_LR L1-regularized logistic regression
|
||||
L2R_LR_DUAL L2-regularized logistic regression (dual)
|
||||
|
||||
C is the cost of constraints violation.
|
||||
eps is the stopping criterion.
|
||||
|
||||
nr_weight, weight_label, and weight are used to change the penalty
|
||||
for some classes (If the weight for a class is not changed, it is
|
||||
set to 1). This is useful for training classifier using unbalanced
|
||||
input data or with asymmetric misclassification cost.
|
||||
|
||||
nr_weight is the number of elements in the array weight_label and
|
||||
weight. Each weight[i] corresponds to weight_label[i], meaning that
|
||||
the penalty of class weight_label[i] is scaled by a factor of weight[i].
|
||||
|
||||
If you do not want to change penalty for any of the classes,
|
||||
just set nr_weight to 0.
|
||||
|
||||
*NOTE* To avoid wrong parameters, check_parameter() should be
|
||||
called before train().
|
||||
|
||||
struct model stores the model obtained from the training procedure:
|
||||
|
||||
struct model
|
||||
{
|
||||
struct parameter param;
|
||||
int nr_class; /* number of classes */
|
||||
int nr_feature;
|
||||
double *w;
|
||||
int *label; /* label of each class */
|
||||
double bias;
|
||||
};
|
||||
|
||||
param describes the parameters used to obtain the model.
|
||||
|
||||
nr_class and nr_feature are the number of classes and features, respectively.
|
||||
|
||||
The nr_feature*nr_class array w gives feature weights. We use one
|
||||
against the rest for multi-class classification, so each feature
|
||||
index corresponds to nr_class weight values. Weights are
|
||||
organized in the following way
|
||||
|
||||
+------------------+------------------+------------+
|
||||
| nr_class weights | nr_class weights | ...
|
||||
| for 1st feature | for 2nd feature |
|
||||
+------------------+------------------+------------+
|
||||
|
||||
If bias >= 0, x becomes [x; bias]. The number of features is
|
||||
increased by one, so w is a (nr_feature+1)*nr_class array. The
|
||||
value of bias is stored in the variable bias.
|
||||
|
||||
The array label stores class labels.
|
||||
|
||||
- Function: void cross_validation(const problem *prob, const parameter *param, int nr_fold, int *target);
|
||||
|
||||
This function conducts cross validation. Data are separated to
|
||||
nr_fold folds. Under given parameters, sequentially each fold is
|
||||
validated using the model from training the remaining. Predicted
|
||||
labels in the validation process are stored in the array called
|
||||
target.
|
||||
|
||||
The format of prob is same as that for train().
|
||||
|
||||
- Function: int predict(const model *model_, const feature_node *x);
|
||||
|
||||
This functions classifies a test vector using the given
|
||||
model. The predicted label is returned.
|
||||
|
||||
- Function: int predict_values(const struct model *model_,
|
||||
const struct feature_node *x, double* dec_values);
|
||||
|
||||
This function gives nr_w decision values in the array
|
||||
dec_values. nr_w is 1 if there are two classes except multi-class
|
||||
svm by Crammer and Singer (-s 4), and is the number of classes otherwise.
|
||||
|
||||
We implement one-vs-the rest multi-class strategy (-s 0,1,2,3) and
|
||||
multi-class svm by Crammer and Singer (-s 4) for multi-class SVM.
|
||||
The class with the highest decision value is returned.
|
||||
|
||||
- Function: int predict_probability(const struct model *model_,
|
||||
const struct feature_node *x, double* prob_estimates);
|
||||
|
||||
This function gives nr_class probability estimates in the array
|
||||
prob_estimates. nr_class can be obtained from the function
|
||||
get_nr_class. The class with the highest probability is
|
||||
returned. Currently, we support only the probability outputs of
|
||||
logistic regression.
|
||||
|
||||
- Function: int get_nr_feature(const model *model_);
|
||||
|
||||
The function gives the number of attributes of the model.
|
||||
|
||||
- Function: int get_nr_class(const model *model_);
|
||||
|
||||
The function gives the number of classes of the model.
|
||||
|
||||
- Function: void get_labels(const model *model_, int* label);
|
||||
|
||||
This function outputs the name of labels into an array called label.
|
||||
|
||||
- Function: const char *check_parameter(const struct problem *prob,
|
||||
const struct parameter *param);
|
||||
|
||||
This function checks whether the parameters are within the feasible
|
||||
range of the problem. This function should be called before calling
|
||||
train() and cross_validation(). It returns NULL if the
|
||||
parameters are feasible, otherwise an error message is returned.
|
||||
|
||||
- Function: int save_model(const char *model_file_name,
|
||||
const struct model *model_);
|
||||
|
||||
This function saves a model to a file; returns 0 on success, or -1
|
||||
if an error occurs.
|
||||
|
||||
- Function: struct model *load_model(const char *model_file_name);
|
||||
|
||||
This function returns a pointer to the model read from the file,
|
||||
or a null pointer if the model could not be loaded.
|
||||
|
||||
- Function: void free_model_content(struct model *model_ptr);
|
||||
|
||||
This function frees the memory used by the entries in a model structure.
|
||||
|
||||
- Function: void free_and_destroy_model(struct model **model_ptr_ptr);
|
||||
|
||||
This function frees the memory used by a model and destroys the model
|
||||
structure.
|
||||
|
||||
- Function: void destroy_param(struct parameter *param);
|
||||
|
||||
This function frees the memory used by a parameter set.
|
||||
|
||||
- Function: void set_print_string_function(void (*print_func)(const char *));
|
||||
|
||||
Users can specify their output format by a function. Use
|
||||
set_print_string_function(NULL);
|
||||
for default printing to stdout.
|
||||
|
||||
Building Windows Binaries
|
||||
=========================
|
||||
|
||||
Windows binaries are in the directory `windows'. To build them via
|
||||
Visual C++, use the following steps:
|
||||
|
||||
1. Open a dos command box and change to liblinear directory. If
|
||||
environment variables of VC++ have not been set, type
|
||||
|
||||
"C:\Program Files\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat"
|
||||
|
||||
You may have to modify the above command according which version of
|
||||
VC++ or where it is installed.
|
||||
|
||||
2. Type
|
||||
|
||||
nmake -f Makefile.win clean all
|
||||
|
||||
|
||||
MATLAB/OCTAVE Interface
|
||||
=======================
|
||||
|
||||
Please check the file README in the directory `matlab'.
|
||||
|
||||
PYTHON Interface
|
||||
================
|
||||
|
||||
Please check the file README in the directory `python'.
|
||||
|
||||
Additional Information
|
||||
======================
|
||||
|
||||
If you find LIBLINEAR helpful, please cite it as
|
||||
|
||||
R.-E. Fan, K.-W. Chang, C.-J. Hsieh, X.-R. Wang, and C.-J. Lin.
|
||||
LIBLINEAR: A Library for Large Linear Classification, Journal of
|
||||
Machine Learning Research 9(2008), 1871-1874. Software available at
|
||||
http://www.csie.ntu.edu.tw/~cjlin/liblinear
|
||||
|
||||
For any questions and comments, please send your email to
|
||||
cjlin@csie.ntu.edu.tw
|
||||
|
||||
|
||||
22
liblinear/blas/Makefile
Normal file
22
liblinear/blas/Makefile
Normal file
@@ -0,0 +1,22 @@
|
||||
AR = ar rcv
|
||||
RANLIB = ranlib
|
||||
|
||||
HEADERS = blas.h blas.h blasp.h
|
||||
FILES = dnrm2.o daxpy.o ddot.o dscal.o
|
||||
|
||||
CFLAGS = $(OPTFLAGS)
|
||||
FFLAGS = $(OPTFLAGS)
|
||||
|
||||
blas: $(FILES) $(HEADERS)
|
||||
$(AR) blas.a $(FILES)
|
||||
$(RANLIB) blas.a
|
||||
|
||||
clean:
|
||||
- rm -f *.o
|
||||
- rm -f *.a
|
||||
- rm -f *~
|
||||
|
||||
.c.o:
|
||||
$(CC) $(CFLAGS) -c $*.c
|
||||
|
||||
|
||||
25
liblinear/blas/blas.h
Normal file
25
liblinear/blas/blas.h
Normal file
@@ -0,0 +1,25 @@
|
||||
/* blas.h -- C header file for BLAS Ver 1.0 */
|
||||
/* Jesse Bennett March 23, 2000 */
|
||||
|
||||
/** barf [ba:rf] 2. "He suggested using FORTRAN, and everybody barfed."
|
||||
|
||||
- From The Shogakukan DICTIONARY OF NEW ENGLISH (Second edition) */
|
||||
|
||||
#ifndef BLAS_INCLUDE
|
||||
#define BLAS_INCLUDE
|
||||
|
||||
/* Data types specific to BLAS implementation */
|
||||
typedef struct { float r, i; } fcomplex;
|
||||
typedef struct { double r, i; } dcomplex;
|
||||
typedef int blasbool;
|
||||
|
||||
#include "blasp.h" /* Prototypes for all BLAS functions */
|
||||
|
||||
#define FALSE 0
|
||||
#define TRUE 1
|
||||
|
||||
/* Macro functions */
|
||||
#define MIN(a,b) ((a) <= (b) ? (a) : (b))
|
||||
#define MAX(a,b) ((a) >= (b) ? (a) : (b))
|
||||
|
||||
#endif
|
||||
430
liblinear/blas/blasp.h
Normal file
430
liblinear/blas/blasp.h
Normal file
@@ -0,0 +1,430 @@
|
||||
/* blasp.h -- C prototypes for BLAS Ver 1.0 */
|
||||
/* Jesse Bennett March 23, 2000 */
|
||||
|
||||
/* Functions listed in alphabetical order */
|
||||
|
||||
#ifdef F2C_COMPAT
|
||||
|
||||
void cdotc_(fcomplex *dotval, int *n, fcomplex *cx, int *incx,
|
||||
fcomplex *cy, int *incy);
|
||||
|
||||
void cdotu_(fcomplex *dotval, int *n, fcomplex *cx, int *incx,
|
||||
fcomplex *cy, int *incy);
|
||||
|
||||
double sasum_(int *n, float *sx, int *incx);
|
||||
|
||||
double scasum_(int *n, fcomplex *cx, int *incx);
|
||||
|
||||
double scnrm2_(int *n, fcomplex *x, int *incx);
|
||||
|
||||
double sdot_(int *n, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
double snrm2_(int *n, float *x, int *incx);
|
||||
|
||||
void zdotc_(dcomplex *dotval, int *n, dcomplex *cx, int *incx,
|
||||
dcomplex *cy, int *incy);
|
||||
|
||||
void zdotu_(dcomplex *dotval, int *n, dcomplex *cx, int *incx,
|
||||
dcomplex *cy, int *incy);
|
||||
|
||||
#else
|
||||
|
||||
fcomplex cdotc_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
|
||||
|
||||
fcomplex cdotu_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
|
||||
|
||||
float sasum_(int *n, float *sx, int *incx);
|
||||
|
||||
float scasum_(int *n, fcomplex *cx, int *incx);
|
||||
|
||||
float scnrm2_(int *n, fcomplex *x, int *incx);
|
||||
|
||||
float sdot_(int *n, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
float snrm2_(int *n, float *x, int *incx);
|
||||
|
||||
dcomplex zdotc_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
|
||||
|
||||
dcomplex zdotu_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
|
||||
|
||||
#endif
|
||||
|
||||
/* Remaining functions listed in alphabetical order */
|
||||
|
||||
int caxpy_(int *n, fcomplex *ca, fcomplex *cx, int *incx, fcomplex *cy,
|
||||
int *incy);
|
||||
|
||||
int ccopy_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
|
||||
|
||||
int cgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
|
||||
fcomplex *alpha, fcomplex *a, int *lda, fcomplex *x, int *incx,
|
||||
fcomplex *beta, fcomplex *y, int *incy);
|
||||
|
||||
int cgemm_(char *transa, char *transb, int *m, int *n, int *k,
|
||||
fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b, int *ldb,
|
||||
fcomplex *beta, fcomplex *c, int *ldc);
|
||||
|
||||
int cgemv_(char *trans, int *m, int *n, fcomplex *alpha, fcomplex *a,
|
||||
int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y,
|
||||
int *incy);
|
||||
|
||||
int cgerc_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *y, int *incy, fcomplex *a, int *lda);
|
||||
|
||||
int cgeru_(int *m, int *n, fcomplex *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *y, int *incy, fcomplex *a, int *lda);
|
||||
|
||||
int chbmv_(char *uplo, int *n, int *k, fcomplex *alpha, fcomplex *a,
|
||||
int *lda, fcomplex *x, int *incx, fcomplex *beta, fcomplex *y,
|
||||
int *incy);
|
||||
|
||||
int chemm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
|
||||
fcomplex *c, int *ldc);
|
||||
|
||||
int chemv_(char *uplo, int *n, fcomplex *alpha, fcomplex *a, int *lda,
|
||||
fcomplex *x, int *incx, fcomplex *beta, fcomplex *y, int *incy);
|
||||
|
||||
int cher_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *a, int *lda);
|
||||
|
||||
int cher2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *y, int *incy, fcomplex *a, int *lda);
|
||||
|
||||
int cher2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *b, int *ldb, float *beta,
|
||||
fcomplex *c, int *ldc);
|
||||
|
||||
int cherk_(char *uplo, char *trans, int *n, int *k, float *alpha,
|
||||
fcomplex *a, int *lda, float *beta, fcomplex *c, int *ldc);
|
||||
|
||||
int chpmv_(char *uplo, int *n, fcomplex *alpha, fcomplex *ap, fcomplex *x,
|
||||
int *incx, fcomplex *beta, fcomplex *y, int *incy);
|
||||
|
||||
int chpr_(char *uplo, int *n, float *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *ap);
|
||||
|
||||
int chpr2_(char *uplo, int *n, fcomplex *alpha, fcomplex *x, int *incx,
|
||||
fcomplex *y, int *incy, fcomplex *ap);
|
||||
|
||||
int crotg_(fcomplex *ca, fcomplex *cb, float *c, fcomplex *s);
|
||||
|
||||
int cscal_(int *n, fcomplex *ca, fcomplex *cx, int *incx);
|
||||
|
||||
int csscal_(int *n, float *sa, fcomplex *cx, int *incx);
|
||||
|
||||
int cswap_(int *n, fcomplex *cx, int *incx, fcomplex *cy, int *incy);
|
||||
|
||||
int csymm_(char *side, char *uplo, int *m, int *n, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
|
||||
fcomplex *c, int *ldc);
|
||||
|
||||
int csyr2k_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *b, int *ldb, fcomplex *beta,
|
||||
fcomplex *c, int *ldc);
|
||||
|
||||
int csyrk_(char *uplo, char *trans, int *n, int *k, fcomplex *alpha,
|
||||
fcomplex *a, int *lda, fcomplex *beta, fcomplex *c, int *ldc);
|
||||
|
||||
int ctbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
fcomplex *a, int *lda, fcomplex *x, int *incx);
|
||||
|
||||
int ctbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
fcomplex *a, int *lda, fcomplex *x, int *incx);
|
||||
|
||||
int ctpmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap,
|
||||
fcomplex *x, int *incx);
|
||||
|
||||
int ctpsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *ap,
|
||||
fcomplex *x, int *incx);
|
||||
|
||||
int ctrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b,
|
||||
int *ldb);
|
||||
|
||||
int ctrmv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a,
|
||||
int *lda, fcomplex *x, int *incx);
|
||||
|
||||
int ctrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, fcomplex *alpha, fcomplex *a, int *lda, fcomplex *b,
|
||||
int *ldb);
|
||||
|
||||
int ctrsv_(char *uplo, char *trans, char *diag, int *n, fcomplex *a,
|
||||
int *lda, fcomplex *x, int *incx);
|
||||
|
||||
int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
|
||||
int *incy);
|
||||
|
||||
int dcopy_(int *n, double *sx, int *incx, double *sy, int *incy);
|
||||
|
||||
int dgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
|
||||
double *alpha, double *a, int *lda, double *x, int *incx,
|
||||
double *beta, double *y, int *incy);
|
||||
|
||||
int dgemm_(char *transa, char *transb, int *m, int *n, int *k,
|
||||
double *alpha, double *a, int *lda, double *b, int *ldb,
|
||||
double *beta, double *c, int *ldc);
|
||||
|
||||
int dgemv_(char *trans, int *m, int *n, double *alpha, double *a,
|
||||
int *lda, double *x, int *incx, double *beta, double *y,
|
||||
int *incy);
|
||||
|
||||
int dger_(int *m, int *n, double *alpha, double *x, int *incx,
|
||||
double *y, int *incy, double *a, int *lda);
|
||||
|
||||
int drot_(int *n, double *sx, int *incx, double *sy, int *incy,
|
||||
double *c, double *s);
|
||||
|
||||
int drotg_(double *sa, double *sb, double *c, double *s);
|
||||
|
||||
int dsbmv_(char *uplo, int *n, int *k, double *alpha, double *a,
|
||||
int *lda, double *x, int *incx, double *beta, double *y,
|
||||
int *incy);
|
||||
|
||||
int dscal_(int *n, double *sa, double *sx, int *incx);
|
||||
|
||||
int dspmv_(char *uplo, int *n, double *alpha, double *ap, double *x,
|
||||
int *incx, double *beta, double *y, int *incy);
|
||||
|
||||
int dspr_(char *uplo, int *n, double *alpha, double *x, int *incx,
|
||||
double *ap);
|
||||
|
||||
int dspr2_(char *uplo, int *n, double *alpha, double *x, int *incx,
|
||||
double *y, int *incy, double *ap);
|
||||
|
||||
int dswap_(int *n, double *sx, int *incx, double *sy, int *incy);
|
||||
|
||||
int dsymm_(char *side, char *uplo, int *m, int *n, double *alpha,
|
||||
double *a, int *lda, double *b, int *ldb, double *beta,
|
||||
double *c, int *ldc);
|
||||
|
||||
int dsymv_(char *uplo, int *n, double *alpha, double *a, int *lda,
|
||||
double *x, int *incx, double *beta, double *y, int *incy);
|
||||
|
||||
int dsyr_(char *uplo, int *n, double *alpha, double *x, int *incx,
|
||||
double *a, int *lda);
|
||||
|
||||
int dsyr2_(char *uplo, int *n, double *alpha, double *x, int *incx,
|
||||
double *y, int *incy, double *a, int *lda);
|
||||
|
||||
int dsyr2k_(char *uplo, char *trans, int *n, int *k, double *alpha,
|
||||
double *a, int *lda, double *b, int *ldb, double *beta,
|
||||
double *c, int *ldc);
|
||||
|
||||
int dsyrk_(char *uplo, char *trans, int *n, int *k, double *alpha,
|
||||
double *a, int *lda, double *beta, double *c, int *ldc);
|
||||
|
||||
int dtbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
double *a, int *lda, double *x, int *incx);
|
||||
|
||||
int dtbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
double *a, int *lda, double *x, int *incx);
|
||||
|
||||
int dtpmv_(char *uplo, char *trans, char *diag, int *n, double *ap,
|
||||
double *x, int *incx);
|
||||
|
||||
int dtpsv_(char *uplo, char *trans, char *diag, int *n, double *ap,
|
||||
double *x, int *incx);
|
||||
|
||||
int dtrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, double *alpha, double *a, int *lda, double *b,
|
||||
int *ldb);
|
||||
|
||||
int dtrmv_(char *uplo, char *trans, char *diag, int *n, double *a,
|
||||
int *lda, double *x, int *incx);
|
||||
|
||||
int dtrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, double *alpha, double *a, int *lda, double *b,
|
||||
int *ldb);
|
||||
|
||||
int dtrsv_(char *uplo, char *trans, char *diag, int *n, double *a,
|
||||
int *lda, double *x, int *incx);
|
||||
|
||||
|
||||
int saxpy_(int *n, float *sa, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
int scopy_(int *n, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
int sgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
|
||||
float *alpha, float *a, int *lda, float *x, int *incx,
|
||||
float *beta, float *y, int *incy);
|
||||
|
||||
int sgemm_(char *transa, char *transb, int *m, int *n, int *k,
|
||||
float *alpha, float *a, int *lda, float *b, int *ldb,
|
||||
float *beta, float *c, int *ldc);
|
||||
|
||||
int sgemv_(char *trans, int *m, int *n, float *alpha, float *a,
|
||||
int *lda, float *x, int *incx, float *beta, float *y,
|
||||
int *incy);
|
||||
|
||||
int sger_(int *m, int *n, float *alpha, float *x, int *incx,
|
||||
float *y, int *incy, float *a, int *lda);
|
||||
|
||||
int srot_(int *n, float *sx, int *incx, float *sy, int *incy,
|
||||
float *c, float *s);
|
||||
|
||||
int srotg_(float *sa, float *sb, float *c, float *s);
|
||||
|
||||
int ssbmv_(char *uplo, int *n, int *k, float *alpha, float *a,
|
||||
int *lda, float *x, int *incx, float *beta, float *y,
|
||||
int *incy);
|
||||
|
||||
int sscal_(int *n, float *sa, float *sx, int *incx);
|
||||
|
||||
int sspmv_(char *uplo, int *n, float *alpha, float *ap, float *x,
|
||||
int *incx, float *beta, float *y, int *incy);
|
||||
|
||||
int sspr_(char *uplo, int *n, float *alpha, float *x, int *incx,
|
||||
float *ap);
|
||||
|
||||
int sspr2_(char *uplo, int *n, float *alpha, float *x, int *incx,
|
||||
float *y, int *incy, float *ap);
|
||||
|
||||
int sswap_(int *n, float *sx, int *incx, float *sy, int *incy);
|
||||
|
||||
int ssymm_(char *side, char *uplo, int *m, int *n, float *alpha,
|
||||
float *a, int *lda, float *b, int *ldb, float *beta,
|
||||
float *c, int *ldc);
|
||||
|
||||
int ssymv_(char *uplo, int *n, float *alpha, float *a, int *lda,
|
||||
float *x, int *incx, float *beta, float *y, int *incy);
|
||||
|
||||
int ssyr_(char *uplo, int *n, float *alpha, float *x, int *incx,
|
||||
float *a, int *lda);
|
||||
|
||||
int ssyr2_(char *uplo, int *n, float *alpha, float *x, int *incx,
|
||||
float *y, int *incy, float *a, int *lda);
|
||||
|
||||
int ssyr2k_(char *uplo, char *trans, int *n, int *k, float *alpha,
|
||||
float *a, int *lda, float *b, int *ldb, float *beta,
|
||||
float *c, int *ldc);
|
||||
|
||||
int ssyrk_(char *uplo, char *trans, int *n, int *k, float *alpha,
|
||||
float *a, int *lda, float *beta, float *c, int *ldc);
|
||||
|
||||
int stbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
float *a, int *lda, float *x, int *incx);
|
||||
|
||||
int stbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
float *a, int *lda, float *x, int *incx);
|
||||
|
||||
int stpmv_(char *uplo, char *trans, char *diag, int *n, float *ap,
|
||||
float *x, int *incx);
|
||||
|
||||
int stpsv_(char *uplo, char *trans, char *diag, int *n, float *ap,
|
||||
float *x, int *incx);
|
||||
|
||||
int strmm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, float *alpha, float *a, int *lda, float *b,
|
||||
int *ldb);
|
||||
|
||||
int strmv_(char *uplo, char *trans, char *diag, int *n, float *a,
|
||||
int *lda, float *x, int *incx);
|
||||
|
||||
int strsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, float *alpha, float *a, int *lda, float *b,
|
||||
int *ldb);
|
||||
|
||||
int strsv_(char *uplo, char *trans, char *diag, int *n, float *a,
|
||||
int *lda, float *x, int *incx);
|
||||
|
||||
int zaxpy_(int *n, dcomplex *ca, dcomplex *cx, int *incx, dcomplex *cy,
|
||||
int *incy);
|
||||
|
||||
int zcopy_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
|
||||
|
||||
int zdscal_(int *n, double *sa, dcomplex *cx, int *incx);
|
||||
|
||||
int zgbmv_(char *trans, int *m, int *n, int *kl, int *ku,
|
||||
dcomplex *alpha, dcomplex *a, int *lda, dcomplex *x, int *incx,
|
||||
dcomplex *beta, dcomplex *y, int *incy);
|
||||
|
||||
int zgemm_(char *transa, char *transb, int *m, int *n, int *k,
|
||||
dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b, int *ldb,
|
||||
dcomplex *beta, dcomplex *c, int *ldc);
|
||||
|
||||
int zgemv_(char *trans, int *m, int *n, dcomplex *alpha, dcomplex *a,
|
||||
int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y,
|
||||
int *incy);
|
||||
|
||||
int zgerc_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *y, int *incy, dcomplex *a, int *lda);
|
||||
|
||||
int zgeru_(int *m, int *n, dcomplex *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *y, int *incy, dcomplex *a, int *lda);
|
||||
|
||||
int zhbmv_(char *uplo, int *n, int *k, dcomplex *alpha, dcomplex *a,
|
||||
int *lda, dcomplex *x, int *incx, dcomplex *beta, dcomplex *y,
|
||||
int *incy);
|
||||
|
||||
int zhemm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
|
||||
dcomplex *c, int *ldc);
|
||||
|
||||
int zhemv_(char *uplo, int *n, dcomplex *alpha, dcomplex *a, int *lda,
|
||||
dcomplex *x, int *incx, dcomplex *beta, dcomplex *y, int *incy);
|
||||
|
||||
int zher_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *a, int *lda);
|
||||
|
||||
int zher2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *y, int *incy, dcomplex *a, int *lda);
|
||||
|
||||
int zher2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *b, int *ldb, double *beta,
|
||||
dcomplex *c, int *ldc);
|
||||
|
||||
int zherk_(char *uplo, char *trans, int *n, int *k, double *alpha,
|
||||
dcomplex *a, int *lda, double *beta, dcomplex *c, int *ldc);
|
||||
|
||||
int zhpmv_(char *uplo, int *n, dcomplex *alpha, dcomplex *ap, dcomplex *x,
|
||||
int *incx, dcomplex *beta, dcomplex *y, int *incy);
|
||||
|
||||
int zhpr_(char *uplo, int *n, double *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *ap);
|
||||
|
||||
int zhpr2_(char *uplo, int *n, dcomplex *alpha, dcomplex *x, int *incx,
|
||||
dcomplex *y, int *incy, dcomplex *ap);
|
||||
|
||||
int zrotg_(dcomplex *ca, dcomplex *cb, double *c, dcomplex *s);
|
||||
|
||||
int zscal_(int *n, dcomplex *ca, dcomplex *cx, int *incx);
|
||||
|
||||
int zswap_(int *n, dcomplex *cx, int *incx, dcomplex *cy, int *incy);
|
||||
|
||||
int zsymm_(char *side, char *uplo, int *m, int *n, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
|
||||
dcomplex *c, int *ldc);
|
||||
|
||||
int zsyr2k_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *b, int *ldb, dcomplex *beta,
|
||||
dcomplex *c, int *ldc);
|
||||
|
||||
int zsyrk_(char *uplo, char *trans, int *n, int *k, dcomplex *alpha,
|
||||
dcomplex *a, int *lda, dcomplex *beta, dcomplex *c, int *ldc);
|
||||
|
||||
int ztbmv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
dcomplex *a, int *lda, dcomplex *x, int *incx);
|
||||
|
||||
int ztbsv_(char *uplo, char *trans, char *diag, int *n, int *k,
|
||||
dcomplex *a, int *lda, dcomplex *x, int *incx);
|
||||
|
||||
int ztpmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap,
|
||||
dcomplex *x, int *incx);
|
||||
|
||||
int ztpsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *ap,
|
||||
dcomplex *x, int *incx);
|
||||
|
||||
int ztrmm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b,
|
||||
int *ldb);
|
||||
|
||||
int ztrmv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a,
|
||||
int *lda, dcomplex *x, int *incx);
|
||||
|
||||
int ztrsm_(char *side, char *uplo, char *transa, char *diag, int *m,
|
||||
int *n, dcomplex *alpha, dcomplex *a, int *lda, dcomplex *b,
|
||||
int *ldb);
|
||||
|
||||
int ztrsv_(char *uplo, char *trans, char *diag, int *n, dcomplex *a,
|
||||
int *lda, dcomplex *x, int *incx);
|
||||
49
liblinear/blas/daxpy.c
Normal file
49
liblinear/blas/daxpy.c
Normal file
@@ -0,0 +1,49 @@
|
||||
#include "blas.h"
|
||||
|
||||
int daxpy_(int *n, double *sa, double *sx, int *incx, double *sy,
|
||||
int *incy)
|
||||
{
|
||||
long int i, m, ix, iy, nn, iincx, iincy;
|
||||
register double ssa;
|
||||
|
||||
/* constant times a vector plus a vector.
|
||||
uses unrolled loop for increments equal to one.
|
||||
jack dongarra, linpack, 3/11/78.
|
||||
modified 12/3/93, array(1) declarations changed to array(*) */
|
||||
|
||||
/* Dereference inputs */
|
||||
nn = *n;
|
||||
ssa = *sa;
|
||||
iincx = *incx;
|
||||
iincy = *incy;
|
||||
|
||||
if( nn > 0 && ssa != 0.0 )
|
||||
{
|
||||
if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
|
||||
{
|
||||
m = nn-3;
|
||||
for (i = 0; i < m; i += 4)
|
||||
{
|
||||
sy[i] += ssa * sx[i];
|
||||
sy[i+1] += ssa * sx[i+1];
|
||||
sy[i+2] += ssa * sx[i+2];
|
||||
sy[i+3] += ssa * sx[i+3];
|
||||
}
|
||||
for ( ; i < nn; ++i) /* clean-up loop */
|
||||
sy[i] += ssa * sx[i];
|
||||
}
|
||||
else /* code for unequal increments or equal increments not equal to 1 */
|
||||
{
|
||||
ix = iincx >= 0 ? 0 : (1 - nn) * iincx;
|
||||
iy = iincy >= 0 ? 0 : (1 - nn) * iincy;
|
||||
for (i = 0; i < nn; i++)
|
||||
{
|
||||
sy[iy] += ssa * sx[ix];
|
||||
ix += iincx;
|
||||
iy += iincy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
} /* daxpy_ */
|
||||
50
liblinear/blas/ddot.c
Normal file
50
liblinear/blas/ddot.c
Normal file
@@ -0,0 +1,50 @@
|
||||
#include "blas.h"
|
||||
|
||||
double ddot_(int *n, double *sx, int *incx, double *sy, int *incy)
|
||||
{
|
||||
long int i, m, nn, iincx, iincy;
|
||||
double stemp;
|
||||
long int ix, iy;
|
||||
|
||||
/* forms the dot product of two vectors.
|
||||
uses unrolled loops for increments equal to one.
|
||||
jack dongarra, linpack, 3/11/78.
|
||||
modified 12/3/93, array(1) declarations changed to array(*) */
|
||||
|
||||
/* Dereference inputs */
|
||||
nn = *n;
|
||||
iincx = *incx;
|
||||
iincy = *incy;
|
||||
|
||||
stemp = 0.0;
|
||||
if (nn > 0)
|
||||
{
|
||||
if (iincx == 1 && iincy == 1) /* code for both increments equal to 1 */
|
||||
{
|
||||
m = nn-4;
|
||||
for (i = 0; i < m; i += 5)
|
||||
stemp += sx[i] * sy[i] + sx[i+1] * sy[i+1] + sx[i+2] * sy[i+2] +
|
||||
sx[i+3] * sy[i+3] + sx[i+4] * sy[i+4];
|
||||
|
||||
for ( ; i < nn; i++) /* clean-up loop */
|
||||
stemp += sx[i] * sy[i];
|
||||
}
|
||||
else /* code for unequal increments or equal increments not equal to 1 */
|
||||
{
|
||||
ix = 0;
|
||||
iy = 0;
|
||||
if (iincx < 0)
|
||||
ix = (1 - nn) * iincx;
|
||||
if (iincy < 0)
|
||||
iy = (1 - nn) * iincy;
|
||||
for (i = 0; i < nn; i++)
|
||||
{
|
||||
stemp += sx[ix] * sy[iy];
|
||||
ix += iincx;
|
||||
iy += iincy;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stemp;
|
||||
} /* ddot_ */
|
||||
62
liblinear/blas/dnrm2.c
Normal file
62
liblinear/blas/dnrm2.c
Normal file
@@ -0,0 +1,62 @@
|
||||
#include <math.h> /* Needed for fabs() and sqrt() */
|
||||
#include "blas.h"
|
||||
|
||||
double dnrm2_(int *n, double *x, int *incx)
|
||||
{
|
||||
long int ix, nn, iincx;
|
||||
double norm, scale, absxi, ssq, temp;
|
||||
|
||||
/* DNRM2 returns the euclidean norm of a vector via the function
|
||||
name, so that
|
||||
|
||||
DNRM2 := sqrt( x'*x )
|
||||
|
||||
-- This version written on 25-October-1982.
|
||||
Modified on 14-October-1993 to inline the call to SLASSQ.
|
||||
Sven Hammarling, Nag Ltd. */
|
||||
|
||||
/* Dereference inputs */
|
||||
nn = *n;
|
||||
iincx = *incx;
|
||||
|
||||
if( nn > 0 && iincx > 0 )
|
||||
{
|
||||
if (nn == 1)
|
||||
{
|
||||
norm = fabs(x[0]);
|
||||
}
|
||||
else
|
||||
{
|
||||
scale = 0.0;
|
||||
ssq = 1.0;
|
||||
|
||||
/* The following loop is equivalent to this call to the LAPACK
|
||||
auxiliary routine: CALL SLASSQ( N, X, INCX, SCALE, SSQ ) */
|
||||
|
||||
for (ix=(nn-1)*iincx; ix>=0; ix-=iincx)
|
||||
{
|
||||
if (x[ix] != 0.0)
|
||||
{
|
||||
absxi = fabs(x[ix]);
|
||||
if (scale < absxi)
|
||||
{
|
||||
temp = scale / absxi;
|
||||
ssq = ssq * (temp * temp) + 1.0;
|
||||
scale = absxi;
|
||||
}
|
||||
else
|
||||
{
|
||||
temp = absxi / scale;
|
||||
ssq += temp * temp;
|
||||
}
|
||||
}
|
||||
}
|
||||
norm = scale * sqrt(ssq);
|
||||
}
|
||||
}
|
||||
else
|
||||
norm = 0.0;
|
||||
|
||||
return norm;
|
||||
|
||||
} /* dnrm2_ */
|
||||
44
liblinear/blas/dscal.c
Normal file
44
liblinear/blas/dscal.c
Normal file
@@ -0,0 +1,44 @@
|
||||
#include "blas.h"
|
||||
|
||||
int dscal_(int *n, double *sa, double *sx, int *incx)
|
||||
{
|
||||
long int i, m, nincx, nn, iincx;
|
||||
double ssa;
|
||||
|
||||
/* scales a vector by a constant.
|
||||
uses unrolled loops for increment equal to 1.
|
||||
jack dongarra, linpack, 3/11/78.
|
||||
modified 3/93 to return if incx .le. 0.
|
||||
modified 12/3/93, array(1) declarations changed to array(*) */
|
||||
|
||||
/* Dereference inputs */
|
||||
nn = *n;
|
||||
iincx = *incx;
|
||||
ssa = *sa;
|
||||
|
||||
if (nn > 0 && iincx > 0)
|
||||
{
|
||||
if (iincx == 1) /* code for increment equal to 1 */
|
||||
{
|
||||
m = nn-4;
|
||||
for (i = 0; i < m; i += 5)
|
||||
{
|
||||
sx[i] = ssa * sx[i];
|
||||
sx[i+1] = ssa * sx[i+1];
|
||||
sx[i+2] = ssa * sx[i+2];
|
||||
sx[i+3] = ssa * sx[i+3];
|
||||
sx[i+4] = ssa * sx[i+4];
|
||||
}
|
||||
for ( ; i < nn; ++i) /* clean-up loop */
|
||||
sx[i] = ssa * sx[i];
|
||||
}
|
||||
else /* code for increment not equal to 1 */
|
||||
{
|
||||
nincx = nn * iincx;
|
||||
for (i = 0; i < nincx; i += iincx)
|
||||
sx[i] = ssa * sx[i];
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
} /* dscal_ */
|
||||
95
liblinear/liblinear.vcxproj
Executable file
95
liblinear/liblinear.vcxproj
Executable file
@@ -0,0 +1,95 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
|
||||
<ItemGroup Label="ProjectConfigurations">
|
||||
<ProjectConfiguration Include="Debug|Win32">
|
||||
<Configuration>Debug</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
<ProjectConfiguration Include="Release|Win32">
|
||||
<Configuration>Release</Configuration>
|
||||
<Platform>Win32</Platform>
|
||||
</ProjectConfiguration>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClCompile Include="blas\daxpy.c" />
|
||||
<ClCompile Include="blas\ddot.c" />
|
||||
<ClCompile Include="blas\dnrm2.c" />
|
||||
<ClCompile Include="blas\dscal.c" />
|
||||
<ClCompile Include="linear.cpp" />
|
||||
<ClCompile Include="tron.cpp" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="blas\blas.h" />
|
||||
<ClInclude Include="blas\blasp.h" />
|
||||
<ClInclude Include="linear.h" />
|
||||
<ClInclude Include="tron.h" />
|
||||
</ItemGroup>
|
||||
<PropertyGroup Label="Globals">
|
||||
<ProjectGuid>{A7BE3D76-F20C-40C5-8986-DE4028B3B57D}</ProjectGuid>
|
||||
<Keyword>Win32Proj</Keyword>
|
||||
<RootNamespace>liblinear</RootNamespace>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>true</UseDebugLibraries>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
|
||||
<ConfigurationType>StaticLibrary</ConfigurationType>
|
||||
<UseDebugLibraries>false</UseDebugLibraries>
|
||||
<WholeProgramOptimization>true</WholeProgramOptimization>
|
||||
<CharacterSet>Unicode</CharacterSet>
|
||||
</PropertyGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
|
||||
<ImportGroup Label="ExtensionSettings">
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
|
||||
</ImportGroup>
|
||||
<PropertyGroup Label="UserMacros" />
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<OutDir>.</OutDir>
|
||||
</PropertyGroup>
|
||||
<PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<OutDir>.</OutDir>
|
||||
</PropertyGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
|
||||
<ClCompile>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<Optimization>Disabled</Optimization>
|
||||
<PreprocessorDefinitions>WIN32;_DEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
|
||||
<ClCompile>
|
||||
<WarningLevel>Level3</WarningLevel>
|
||||
<PrecompiledHeader>
|
||||
</PrecompiledHeader>
|
||||
<Optimization>MaxSpeed</Optimization>
|
||||
<FunctionLevelLinking>true</FunctionLevelLinking>
|
||||
<IntrinsicFunctions>true</IntrinsicFunctions>
|
||||
<PreprocessorDefinitions>WIN32;NDEBUG;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
|
||||
<RuntimeLibrary>MultiThreaded</RuntimeLibrary>
|
||||
</ClCompile>
|
||||
<Link>
|
||||
<SubSystem>Windows</SubSystem>
|
||||
<GenerateDebugInformation>true</GenerateDebugInformation>
|
||||
<EnableCOMDATFolding>true</EnableCOMDATFolding>
|
||||
<OptimizeReferences>true</OptimizeReferences>
|
||||
</Link>
|
||||
</ItemDefinitionGroup>
|
||||
<Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
|
||||
<ImportGroup Label="ExtensionTargets">
|
||||
</ImportGroup>
|
||||
</Project>
|
||||
2382
liblinear/linear.cpp
Normal file
2382
liblinear/linear.cpp
Normal file
File diff suppressed because it is too large
Load Diff
18
liblinear/linear.def
Normal file
18
liblinear/linear.def
Normal file
@@ -0,0 +1,18 @@
|
||||
LIBRARY liblinear
|
||||
EXPORTS
|
||||
train @1
|
||||
cross_validation @2
|
||||
save_model @3
|
||||
load_model @4
|
||||
get_nr_feature @5
|
||||
get_nr_class @6
|
||||
get_labels @7
|
||||
predict_values @8
|
||||
predict @9
|
||||
predict_probability @10
|
||||
free_and_destroy_model @11
|
||||
free_model_content @12
|
||||
destroy_param @13
|
||||
check_parameter @14
|
||||
check_probability_model @15
|
||||
set_print_string_function @16
|
||||
73
liblinear/linear.h
Normal file
73
liblinear/linear.h
Normal file
@@ -0,0 +1,73 @@
|
||||
#ifndef _LIBLINEAR_H
|
||||
#define _LIBLINEAR_H
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct feature_node
|
||||
{
|
||||
int index;
|
||||
double value;
|
||||
};
|
||||
|
||||
struct problem
|
||||
{
|
||||
int l, n;
|
||||
int *y;
|
||||
struct feature_node **x;
|
||||
double bias; /* < 0 if no bias term */
|
||||
};
|
||||
|
||||
enum { L2R_LR, L2R_L2LOSS_SVC_DUAL, L2R_L2LOSS_SVC, L2R_L1LOSS_SVC_DUAL, MCSVM_CS, L1R_L2LOSS_SVC, L1R_LR, L2R_LR_DUAL }; /* solver_type */
|
||||
|
||||
struct parameter
|
||||
{
|
||||
int solver_type;
|
||||
|
||||
/* these are for training only */
|
||||
double eps; /* stopping criteria */
|
||||
double C;
|
||||
int nr_weight;
|
||||
int *weight_label;
|
||||
double* weight;
|
||||
};
|
||||
|
||||
struct model
|
||||
{
|
||||
struct parameter param;
|
||||
int nr_class; /* number of classes */
|
||||
int nr_feature;
|
||||
double *w;
|
||||
int *label; /* label of each class */
|
||||
double bias;
|
||||
};
|
||||
|
||||
struct model* train(const struct problem *prob, const struct parameter *param);
|
||||
void cross_validation(const struct problem *prob, const struct parameter *param, int nr_fold, int *target);
|
||||
|
||||
int predict_values(const struct model *model_, const struct feature_node *x, double* dec_values);
|
||||
int predict(const struct model *model_, const struct feature_node *x);
|
||||
int predict_probability(const struct model *model_, const struct feature_node *x, double* prob_estimates);
|
||||
|
||||
int save_model(const char *model_file_name, const struct model *model_);
|
||||
struct model *load_model(const char *model_file_name);
|
||||
|
||||
int get_nr_feature(const struct model *model_);
|
||||
int get_nr_class(const struct model *model_);
|
||||
void get_labels(const struct model *model_, int* label);
|
||||
|
||||
void free_model_content(struct model *model_ptr);
|
||||
void free_and_destroy_model(struct model **model_ptr_ptr);
|
||||
void destroy_param(struct parameter *param);
|
||||
|
||||
const char *check_parameter(const struct problem *prob, const struct parameter *param);
|
||||
int check_probability_model(const struct model *model);
|
||||
void set_print_string_function(void (*print_func) (const char*));
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _LIBLINEAR_H */
|
||||
|
||||
218
liblinear/predict.c
Normal file
218
liblinear/predict.c
Normal file
@@ -0,0 +1,218 @@
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <errno.h>
|
||||
#include "linear.h"
|
||||
|
||||
struct feature_node *x;
|
||||
int max_nr_attr = 64;
|
||||
|
||||
struct model* model_;
|
||||
int flag_predict_probability=0;
|
||||
|
||||
void exit_input_error(int line_num)
|
||||
{
|
||||
fprintf(stderr,"Wrong input format at line %d\n", line_num);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static char *line = NULL;
|
||||
static int max_line_len;
|
||||
|
||||
static char* readline(FILE *input)
|
||||
{
|
||||
int len;
|
||||
|
||||
if(fgets(line,max_line_len,input) == NULL)
|
||||
return NULL;
|
||||
|
||||
while(strrchr(line,'\n') == NULL)
|
||||
{
|
||||
max_line_len *= 2;
|
||||
line = (char *) realloc(line,max_line_len);
|
||||
len = (int) strlen(line);
|
||||
if(fgets(line+len,max_line_len-len,input) == NULL)
|
||||
break;
|
||||
}
|
||||
return line;
|
||||
}
|
||||
|
||||
void do_predict(FILE *input, FILE *output, struct model* model_)
|
||||
{
|
||||
int correct = 0;
|
||||
int total = 0;
|
||||
|
||||
int nr_class=get_nr_class(model_);
|
||||
double *prob_estimates=NULL;
|
||||
int j, n;
|
||||
int nr_feature=get_nr_feature(model_);
|
||||
if(model_->bias>=0)
|
||||
n=nr_feature+1;
|
||||
else
|
||||
n=nr_feature;
|
||||
|
||||
if(flag_predict_probability)
|
||||
{
|
||||
int *labels;
|
||||
|
||||
if(!check_probability_model(model_))
|
||||
{
|
||||
fprintf(stderr, "probability output is only supported for logistic regression\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
labels=(int *) malloc(nr_class*sizeof(int));
|
||||
get_labels(model_,labels);
|
||||
prob_estimates = (double *) malloc(nr_class*sizeof(double));
|
||||
fprintf(output,"labels");
|
||||
for(j=0;j<nr_class;j++)
|
||||
fprintf(output," %d",labels[j]);
|
||||
fprintf(output,"\n");
|
||||
free(labels);
|
||||
}
|
||||
|
||||
max_line_len = 1024;
|
||||
line = (char *)malloc(max_line_len*sizeof(char));
|
||||
while(readline(input) != NULL)
|
||||
{
|
||||
int i = 0;
|
||||
int target_label, predict_label;
|
||||
char *idx, *val, *label, *endptr;
|
||||
int inst_max_index = 0; // strtol gives 0 if wrong format
|
||||
|
||||
label = strtok(line," \t\n");
|
||||
if(label == NULL) // empty line
|
||||
exit_input_error(total+1);
|
||||
|
||||
target_label = (int) strtol(label,&endptr,10);
|
||||
if(endptr == label || *endptr != '\0')
|
||||
exit_input_error(total+1);
|
||||
|
||||
while(1)
|
||||
{
|
||||
if(i>=max_nr_attr-2) // need one more for index = -1
|
||||
{
|
||||
max_nr_attr *= 2;
|
||||
x = (struct feature_node *) realloc(x,max_nr_attr*sizeof(struct feature_node));
|
||||
}
|
||||
|
||||
idx = strtok(NULL,":");
|
||||
val = strtok(NULL," \t");
|
||||
|
||||
if(val == NULL)
|
||||
break;
|
||||
errno = 0;
|
||||
x[i].index = (int) strtol(idx,&endptr,10);
|
||||
if(endptr == idx || errno != 0 || *endptr != '\0' || x[i].index <= inst_max_index)
|
||||
exit_input_error(total+1);
|
||||
else
|
||||
inst_max_index = x[i].index;
|
||||
|
||||
errno = 0;
|
||||
x[i].value = strtod(val,&endptr);
|
||||
if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
|
||||
exit_input_error(total+1);
|
||||
|
||||
// feature indices larger than those in training are not used
|
||||
if(x[i].index <= nr_feature)
|
||||
++i;
|
||||
}
|
||||
|
||||
if(model_->bias>=0)
|
||||
{
|
||||
x[i].index = n;
|
||||
x[i].value = model_->bias;
|
||||
i++;
|
||||
}
|
||||
x[i].index = -1;
|
||||
|
||||
if(flag_predict_probability)
|
||||
{
|
||||
int j;
|
||||
predict_label = predict_probability(model_,x,prob_estimates);
|
||||
fprintf(output,"%d",predict_label);
|
||||
for(j=0;j<model_->nr_class;j++)
|
||||
fprintf(output," %g",prob_estimates[j]);
|
||||
fprintf(output,"\n");
|
||||
}
|
||||
else
|
||||
{
|
||||
predict_label = predict(model_,x);
|
||||
fprintf(output,"%d\n",predict_label);
|
||||
}
|
||||
|
||||
if(predict_label == target_label)
|
||||
++correct;
|
||||
++total;
|
||||
}
|
||||
printf("Accuracy = %g%% (%d/%d)\n",(double) correct/total*100,correct,total);
|
||||
if(flag_predict_probability)
|
||||
free(prob_estimates);
|
||||
}
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
printf(
|
||||
"Usage: predict [options] test_file model_file output_file\n"
|
||||
"options:\n"
|
||||
"-b probability_estimates: whether to output probability estimates, 0 or 1 (default 0)\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
FILE *input, *output;
|
||||
int i;
|
||||
|
||||
// parse options
|
||||
for(i=1;i<argc;i++)
|
||||
{
|
||||
if(argv[i][0] != '-') break;
|
||||
++i;
|
||||
switch(argv[i-1][1])
|
||||
{
|
||||
case 'b':
|
||||
flag_predict_probability = atoi(argv[i]);
|
||||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr,"unknown option: -%c\n", argv[i-1][1]);
|
||||
exit_with_help();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(i>=argc)
|
||||
exit_with_help();
|
||||
|
||||
input = fopen(argv[i],"r");
|
||||
if(input == NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open input file %s\n",argv[i]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
output = fopen(argv[i+2],"w");
|
||||
if(output == NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open output file %s\n",argv[i+2]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if((model_=load_model(argv[i+1]))==0)
|
||||
{
|
||||
fprintf(stderr,"can't open model file %s\n",argv[i+1]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
x = (struct feature_node *) malloc(max_nr_attr*sizeof(struct feature_node));
|
||||
do_predict(input, output, model_);
|
||||
free_and_destroy_model(&model_);
|
||||
free(line);
|
||||
free(x);
|
||||
fclose(input);
|
||||
fclose(output);
|
||||
return 0;
|
||||
}
|
||||
|
||||
340
liblinear/train.c
Normal file
340
liblinear/train.c
Normal file
@@ -0,0 +1,340 @@
|
||||
#include <stdio.h>
|
||||
#include <math.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include "linear.h"
|
||||
#define Malloc(type,n) (type *)malloc((n)*sizeof(type))
|
||||
#define INF HUGE_VAL
|
||||
|
||||
void print_null(const char *s) {}
|
||||
|
||||
void exit_with_help()
|
||||
{
|
||||
printf(
|
||||
"Usage: train [options] training_set_file [model_file]\n"
|
||||
"options:\n"
|
||||
"-s type : set type of solver (default 1)\n"
|
||||
" 0 -- L2-regularized logistic regression (primal)\n"
|
||||
" 1 -- L2-regularized L2-loss support vector classification (dual)\n"
|
||||
" 2 -- L2-regularized L2-loss support vector classification (primal)\n"
|
||||
" 3 -- L2-regularized L1-loss support vector classification (dual)\n"
|
||||
" 4 -- multi-class support vector classification by Crammer and Singer\n"
|
||||
" 5 -- L1-regularized L2-loss support vector classification\n"
|
||||
" 6 -- L1-regularized logistic regression\n"
|
||||
" 7 -- L2-regularized logistic regression (dual)\n"
|
||||
"-c cost : set the parameter C (default 1)\n"
|
||||
"-e epsilon : set tolerance of termination criterion\n"
|
||||
" -s 0 and 2\n"
|
||||
" |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2,\n"
|
||||
" where f is the primal function and pos/neg are # of\n"
|
||||
" positive/negative data (default 0.01)\n"
|
||||
" -s 1, 3, 4 and 7\n"
|
||||
" Dual maximal violation <= eps; similar to libsvm (default 0.1)\n"
|
||||
" -s 5 and 6\n"
|
||||
" |f'(w)|_1 <= eps*min(pos,neg)/l*|f'(w0)|_1,\n"
|
||||
" where f is the primal function (default 0.01)\n"
|
||||
"-B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1)\n"
|
||||
"-wi weight: weights adjust the parameter C of different classes (see README for details)\n"
|
||||
"-v n: n-fold cross validation mode\n"
|
||||
"-q : quiet mode (no outputs)\n"
|
||||
);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
void exit_input_error(int line_num)
|
||||
{
|
||||
fprintf(stderr,"Wrong input format at line %d\n", line_num);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
static char *line = NULL;
|
||||
static int max_line_len;
|
||||
|
||||
static char* readline(FILE *input)
|
||||
{
|
||||
int len;
|
||||
|
||||
if(fgets(line,max_line_len,input) == NULL)
|
||||
return NULL;
|
||||
|
||||
while(strrchr(line,'\n') == NULL)
|
||||
{
|
||||
max_line_len *= 2;
|
||||
line = (char *) realloc(line,max_line_len);
|
||||
len = (int) strlen(line);
|
||||
if(fgets(line+len,max_line_len-len,input) == NULL)
|
||||
break;
|
||||
}
|
||||
return line;
|
||||
}
|
||||
|
||||
void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name);
|
||||
void read_problem(const char *filename);
|
||||
void do_cross_validation();
|
||||
|
||||
struct feature_node *x_space;
|
||||
struct parameter param;
|
||||
struct problem prob;
|
||||
struct model* model_;
|
||||
int flag_cross_validation;
|
||||
int nr_fold;
|
||||
double bias;
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
char input_file_name[1024];
|
||||
char model_file_name[1024];
|
||||
const char *error_msg;
|
||||
|
||||
parse_command_line(argc, argv, input_file_name, model_file_name);
|
||||
read_problem(input_file_name);
|
||||
error_msg = check_parameter(&prob,¶m);
|
||||
|
||||
if(error_msg)
|
||||
{
|
||||
fprintf(stderr,"Error: %s\n",error_msg);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(flag_cross_validation)
|
||||
{
|
||||
do_cross_validation();
|
||||
}
|
||||
else
|
||||
{
|
||||
model_=train(&prob, ¶m);
|
||||
if(save_model(model_file_name, model_))
|
||||
{
|
||||
fprintf(stderr,"can't save model to file %s\n",model_file_name);
|
||||
exit(1);
|
||||
}
|
||||
free_and_destroy_model(&model_);
|
||||
}
|
||||
destroy_param(¶m);
|
||||
free(prob.y);
|
||||
free(prob.x);
|
||||
free(x_space);
|
||||
free(line);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_cross_validation()
|
||||
{
|
||||
int i;
|
||||
int total_correct = 0;
|
||||
int *target = Malloc(int, prob.l);
|
||||
|
||||
cross_validation(&prob,¶m,nr_fold,target);
|
||||
|
||||
for(i=0;i<prob.l;i++)
|
||||
if(target[i] == prob.y[i])
|
||||
++total_correct;
|
||||
printf("Cross Validation Accuracy = %g%%\n",100.0*total_correct/prob.l);
|
||||
|
||||
free(target);
|
||||
}
|
||||
|
||||
void parse_command_line(int argc, char **argv, char *input_file_name, char *model_file_name)
|
||||
{
|
||||
int i;
|
||||
void (*print_func)(const char*) = NULL; // default printing to stdout
|
||||
|
||||
// default values
|
||||
param.solver_type = L2R_L2LOSS_SVC_DUAL;
|
||||
param.C = 1;
|
||||
param.eps = INF; // see setting below
|
||||
param.nr_weight = 0;
|
||||
param.weight_label = NULL;
|
||||
param.weight = NULL;
|
||||
flag_cross_validation = 0;
|
||||
bias = -1;
|
||||
|
||||
// parse options
|
||||
for(i=1;i<argc;i++)
|
||||
{
|
||||
if(argv[i][0] != '-') break;
|
||||
if(++i>=argc)
|
||||
exit_with_help();
|
||||
switch(argv[i-1][1])
|
||||
{
|
||||
case 's':
|
||||
param.solver_type = atoi(argv[i]);
|
||||
break;
|
||||
|
||||
case 'c':
|
||||
param.C = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'e':
|
||||
param.eps = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'B':
|
||||
bias = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'w':
|
||||
++param.nr_weight;
|
||||
param.weight_label = (int *) realloc(param.weight_label,sizeof(int)*param.nr_weight);
|
||||
param.weight = (double *) realloc(param.weight,sizeof(double)*param.nr_weight);
|
||||
param.weight_label[param.nr_weight-1] = atoi(&argv[i-1][2]);
|
||||
param.weight[param.nr_weight-1] = atof(argv[i]);
|
||||
break;
|
||||
|
||||
case 'v':
|
||||
flag_cross_validation = 1;
|
||||
nr_fold = atoi(argv[i]);
|
||||
if(nr_fold < 2)
|
||||
{
|
||||
fprintf(stderr,"n-fold cross validation: n must >= 2\n");
|
||||
exit_with_help();
|
||||
}
|
||||
break;
|
||||
|
||||
case 'q':
|
||||
print_func = &print_null;
|
||||
i--;
|
||||
break;
|
||||
|
||||
default:
|
||||
fprintf(stderr,"unknown option: -%c\n", argv[i-1][1]);
|
||||
exit_with_help();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
set_print_string_function(print_func);
|
||||
|
||||
// determine filenames
|
||||
if(i>=argc)
|
||||
exit_with_help();
|
||||
|
||||
strcpy(input_file_name, argv[i]);
|
||||
|
||||
if(i<argc-1)
|
||||
strcpy(model_file_name,argv[i+1]);
|
||||
else
|
||||
{
|
||||
char *p = strrchr(argv[i],'/');
|
||||
if(p==NULL)
|
||||
p = argv[i];
|
||||
else
|
||||
++p;
|
||||
sprintf(model_file_name,"%s.model",p);
|
||||
}
|
||||
|
||||
if(param.eps == INF)
|
||||
{
|
||||
if(param.solver_type == L2R_LR || param.solver_type == L2R_L2LOSS_SVC)
|
||||
param.eps = 0.01;
|
||||
else if(param.solver_type == L2R_L2LOSS_SVC_DUAL || param.solver_type == L2R_L1LOSS_SVC_DUAL || param.solver_type == MCSVM_CS || param.solver_type == L2R_LR_DUAL)
|
||||
param.eps = 0.1;
|
||||
else if(param.solver_type == L1R_L2LOSS_SVC || param.solver_type == L1R_LR)
|
||||
param.eps = 0.01;
|
||||
}
|
||||
}
|
||||
|
||||
// read in a problem (in libsvm format)
|
||||
void read_problem(const char *filename)
|
||||
{
|
||||
int max_index, inst_max_index, i;
|
||||
long int elements, j;
|
||||
FILE *fp = fopen(filename,"r");
|
||||
char *endptr;
|
||||
char *idx, *val, *label;
|
||||
|
||||
if(fp == NULL)
|
||||
{
|
||||
fprintf(stderr,"can't open input file %s\n",filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
prob.l = 0;
|
||||
elements = 0;
|
||||
max_line_len = 1024;
|
||||
line = Malloc(char,max_line_len);
|
||||
while(readline(fp)!=NULL)
|
||||
{
|
||||
char *p = strtok(line," \t"); // label
|
||||
|
||||
// features
|
||||
while(1)
|
||||
{
|
||||
p = strtok(NULL," \t");
|
||||
if(p == NULL || *p == '\n') // check '\n' as ' ' may be after the last feature
|
||||
break;
|
||||
elements++;
|
||||
}
|
||||
elements++; // for bias term
|
||||
prob.l++;
|
||||
}
|
||||
rewind(fp);
|
||||
|
||||
prob.bias=bias;
|
||||
|
||||
prob.y = Malloc(int,prob.l);
|
||||
prob.x = Malloc(struct feature_node *,prob.l);
|
||||
x_space = Malloc(struct feature_node,elements+prob.l);
|
||||
|
||||
max_index = 0;
|
||||
j=0;
|
||||
for(i=0;i<prob.l;i++)
|
||||
{
|
||||
inst_max_index = 0; // strtol gives 0 if wrong format
|
||||
readline(fp);
|
||||
prob.x[i] = &x_space[j];
|
||||
label = strtok(line," \t\n");
|
||||
if(label == NULL) // empty line
|
||||
exit_input_error(i+1);
|
||||
|
||||
prob.y[i] = (int) strtol(label,&endptr,10);
|
||||
if(endptr == label || *endptr != '\0')
|
||||
exit_input_error(i+1);
|
||||
|
||||
while(1)
|
||||
{
|
||||
idx = strtok(NULL,":");
|
||||
val = strtok(NULL," \t");
|
||||
|
||||
if(val == NULL)
|
||||
break;
|
||||
|
||||
errno = 0;
|
||||
x_space[j].index = (int) strtol(idx,&endptr,10);
|
||||
if(endptr == idx || errno != 0 || *endptr != '\0' || x_space[j].index <= inst_max_index)
|
||||
exit_input_error(i+1);
|
||||
else
|
||||
inst_max_index = x_space[j].index;
|
||||
|
||||
errno = 0;
|
||||
x_space[j].value = strtod(val,&endptr);
|
||||
if(endptr == val || errno != 0 || (*endptr != '\0' && !isspace(*endptr)))
|
||||
exit_input_error(i+1);
|
||||
|
||||
++j;
|
||||
}
|
||||
|
||||
if(inst_max_index > max_index)
|
||||
max_index = inst_max_index;
|
||||
|
||||
if(prob.bias >= 0)
|
||||
x_space[j++].value = prob.bias;
|
||||
|
||||
x_space[j++].index = -1;
|
||||
}
|
||||
|
||||
if(prob.bias >= 0)
|
||||
{
|
||||
prob.n=max_index+1;
|
||||
for(i=1;i<prob.l;i++)
|
||||
(prob.x[i]-2)->index = prob.n;
|
||||
x_space[j-2].index = prob.n;
|
||||
}
|
||||
else
|
||||
prob.n=max_index;
|
||||
|
||||
fclose(fp);
|
||||
}
|
||||
235
liblinear/tron.cpp
Normal file
235
liblinear/tron.cpp
Normal file
@@ -0,0 +1,235 @@
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <stdarg.h>
|
||||
#include "tron.h"
|
||||
|
||||
#ifndef min
|
||||
template <class T> static inline T min(T x,T y) { return (x<y)?x:y; }
|
||||
#endif
|
||||
|
||||
#ifndef max
|
||||
template <class T> static inline T max(T x,T y) { return (x>y)?x:y; }
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
extern double dnrm2_(int *, double *, int *);
|
||||
extern double ddot_(int *, double *, int *, double *, int *);
|
||||
extern int daxpy_(int *, double *, double *, int *, double *, int *);
|
||||
extern int dscal_(int *, double *, double *, int *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
static void default_print(const char *buf)
|
||||
{
|
||||
fputs(buf,stdout);
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
void TRON::info(const char *fmt,...)
|
||||
{
|
||||
char buf[BUFSIZ];
|
||||
va_list ap;
|
||||
va_start(ap,fmt);
|
||||
vsprintf(buf,fmt,ap);
|
||||
va_end(ap);
|
||||
(*tron_print_string)(buf);
|
||||
}
|
||||
|
||||
TRON::TRON(const function *fun_obj, double eps, int max_iter)
|
||||
{
|
||||
this->fun_obj=const_cast<function *>(fun_obj);
|
||||
this->eps=eps;
|
||||
this->max_iter=max_iter;
|
||||
tron_print_string = default_print;
|
||||
}
|
||||
|
||||
TRON::~TRON()
|
||||
{
|
||||
}
|
||||
|
||||
void TRON::tron(double *w)
|
||||
{
|
||||
// Parameters for updating the iterates.
|
||||
double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75;
|
||||
|
||||
// Parameters for updating the trust region size delta.
|
||||
double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4;
|
||||
|
||||
int n = fun_obj->get_nr_variable();
|
||||
int i, cg_iter;
|
||||
double delta, snorm, one=1.0;
|
||||
double alpha, f, fnew, prered, actred, gs;
|
||||
int search = 1, iter = 1, inc = 1;
|
||||
double *s = new double[n];
|
||||
double *r = new double[n];
|
||||
double *w_new = new double[n];
|
||||
double *g = new double[n];
|
||||
|
||||
for (i=0; i<n; i++)
|
||||
w[i] = 0;
|
||||
|
||||
f = fun_obj->fun(w);
|
||||
fun_obj->grad(w, g);
|
||||
delta = dnrm2_(&n, g, &inc);
|
||||
double gnorm1 = delta;
|
||||
double gnorm = gnorm1;
|
||||
|
||||
if (gnorm <= eps*gnorm1)
|
||||
search = 0;
|
||||
|
||||
iter = 1;
|
||||
|
||||
while (iter <= max_iter && search)
|
||||
{
|
||||
cg_iter = trcg(delta, g, s, r);
|
||||
|
||||
memcpy(w_new, w, sizeof(double)*n);
|
||||
daxpy_(&n, &one, s, &inc, w_new, &inc);
|
||||
|
||||
gs = ddot_(&n, g, &inc, s, &inc);
|
||||
prered = -0.5*(gs-ddot_(&n, s, &inc, r, &inc));
|
||||
fnew = fun_obj->fun(w_new);
|
||||
|
||||
// Compute the actual reduction.
|
||||
actred = f - fnew;
|
||||
|
||||
// On the first iteration, adjust the initial step bound.
|
||||
snorm = dnrm2_(&n, s, &inc);
|
||||
if (iter == 1)
|
||||
delta = min(delta, snorm);
|
||||
|
||||
// Compute prediction alpha*snorm of the step.
|
||||
if (fnew - f - gs <= 0)
|
||||
alpha = sigma3;
|
||||
else
|
||||
alpha = max(sigma1, -0.5*(gs/(fnew - f - gs)));
|
||||
|
||||
// Update the trust region bound according to the ratio of actual to predicted reduction.
|
||||
if (actred < eta0*prered)
|
||||
delta = min(max(alpha, sigma1)*snorm, sigma2*delta);
|
||||
else if (actred < eta1*prered)
|
||||
delta = max(sigma1*delta, min(alpha*snorm, sigma2*delta));
|
||||
else if (actred < eta2*prered)
|
||||
delta = max(sigma1*delta, min(alpha*snorm, sigma3*delta));
|
||||
else
|
||||
delta = max(delta, min(alpha*snorm, sigma3*delta));
|
||||
|
||||
info("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d\n", iter, actred, prered, delta, f, gnorm, cg_iter);
|
||||
|
||||
if (actred > eta0*prered)
|
||||
{
|
||||
iter++;
|
||||
memcpy(w, w_new, sizeof(double)*n);
|
||||
f = fnew;
|
||||
fun_obj->grad(w, g);
|
||||
|
||||
gnorm = dnrm2_(&n, g, &inc);
|
||||
if (gnorm <= eps*gnorm1)
|
||||
break;
|
||||
}
|
||||
if (f < -1.0e+32)
|
||||
{
|
||||
info("warning: f < -1.0e+32\n");
|
||||
break;
|
||||
}
|
||||
if (fabs(actred) <= 0 && prered <= 0)
|
||||
{
|
||||
info("warning: actred and prered <= 0\n");
|
||||
break;
|
||||
}
|
||||
if (fabs(actred) <= 1.0e-12*fabs(f) &&
|
||||
fabs(prered) <= 1.0e-12*fabs(f))
|
||||
{
|
||||
info("warning: actred and prered too small\n");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
delete[] g;
|
||||
delete[] r;
|
||||
delete[] w_new;
|
||||
delete[] s;
|
||||
}
|
||||
|
||||
int TRON::trcg(double delta, double *g, double *s, double *r)
|
||||
{
|
||||
int i, inc = 1;
|
||||
int n = fun_obj->get_nr_variable();
|
||||
double one = 1;
|
||||
double *d = new double[n];
|
||||
double *Hd = new double[n];
|
||||
double rTr, rnewTrnew, alpha, beta, cgtol;
|
||||
|
||||
for (i=0; i<n; i++)
|
||||
{
|
||||
s[i] = 0;
|
||||
r[i] = -g[i];
|
||||
d[i] = r[i];
|
||||
}
|
||||
cgtol = 0.1*dnrm2_(&n, g, &inc);
|
||||
|
||||
int cg_iter = 0;
|
||||
rTr = ddot_(&n, r, &inc, r, &inc);
|
||||
while (1)
|
||||
{
|
||||
if (dnrm2_(&n, r, &inc) <= cgtol)
|
||||
break;
|
||||
cg_iter++;
|
||||
fun_obj->Hv(d, Hd);
|
||||
|
||||
alpha = rTr/ddot_(&n, d, &inc, Hd, &inc);
|
||||
daxpy_(&n, &alpha, d, &inc, s, &inc);
|
||||
if (dnrm2_(&n, s, &inc) > delta)
|
||||
{
|
||||
info("cg reaches trust region boundary\n");
|
||||
alpha = -alpha;
|
||||
daxpy_(&n, &alpha, d, &inc, s, &inc);
|
||||
|
||||
double std = ddot_(&n, s, &inc, d, &inc);
|
||||
double sts = ddot_(&n, s, &inc, s, &inc);
|
||||
double dtd = ddot_(&n, d, &inc, d, &inc);
|
||||
double dsq = delta*delta;
|
||||
double rad = sqrt(std*std + dtd*(dsq-sts));
|
||||
if (std >= 0)
|
||||
alpha = (dsq - sts)/(std + rad);
|
||||
else
|
||||
alpha = (rad - std)/dtd;
|
||||
daxpy_(&n, &alpha, d, &inc, s, &inc);
|
||||
alpha = -alpha;
|
||||
daxpy_(&n, &alpha, Hd, &inc, r, &inc);
|
||||
break;
|
||||
}
|
||||
alpha = -alpha;
|
||||
daxpy_(&n, &alpha, Hd, &inc, r, &inc);
|
||||
rnewTrnew = ddot_(&n, r, &inc, r, &inc);
|
||||
beta = rnewTrnew/rTr;
|
||||
dscal_(&n, &beta, d, &inc);
|
||||
daxpy_(&n, &one, r, &inc, d, &inc);
|
||||
rTr = rnewTrnew;
|
||||
}
|
||||
|
||||
delete[] d;
|
||||
delete[] Hd;
|
||||
|
||||
return(cg_iter);
|
||||
}
|
||||
|
||||
double TRON::norm_inf(int n, double *x)
|
||||
{
|
||||
double dmax = fabs(x[0]);
|
||||
for (int i=1; i<n; i++)
|
||||
if (fabs(x[i]) >= dmax)
|
||||
dmax = fabs(x[i]);
|
||||
return(dmax);
|
||||
}
|
||||
|
||||
void TRON::set_print_string(void (*print_string) (const char *buf))
|
||||
{
|
||||
tron_print_string = print_string;
|
||||
}
|
||||
34
liblinear/tron.h
Normal file
34
liblinear/tron.h
Normal file
@@ -0,0 +1,34 @@
|
||||
#ifndef _TRON_H
|
||||
#define _TRON_H
|
||||
|
||||
class function
|
||||
{
|
||||
public:
|
||||
virtual double fun(double *w) = 0 ;
|
||||
virtual void grad(double *w, double *g) = 0 ;
|
||||
virtual void Hv(double *s, double *Hs) = 0 ;
|
||||
|
||||
virtual int get_nr_variable(void) = 0 ;
|
||||
virtual ~function(void){}
|
||||
};
|
||||
|
||||
class TRON
|
||||
{
|
||||
public:
|
||||
TRON(const function *fun_obj, double eps = 0.1, int max_iter = 1000);
|
||||
~TRON();
|
||||
|
||||
void tron(double *w);
|
||||
void set_print_string(void (*i_print) (const char *buf));
|
||||
|
||||
private:
|
||||
int trcg(double delta, double *g, double *s, double *r);
|
||||
double norm_inf(int n, double *x);
|
||||
|
||||
double eps;
|
||||
int max_iter;
|
||||
function *fun_obj;
|
||||
void info(const char *fmt,...);
|
||||
void (*tron_print_string)(const char *buf);
|
||||
};
|
||||
#endif
|
||||
Reference in New Issue
Block a user