Provided by: vowpal-wabbit_7.3-1ubuntu1_amd64 bug

NAME

       vw - Vowpal Wabbit -- fast online learning tool

DESCRIPTION

   VW options:
       -h [ --help ]
              Look here: http://hunch.net/~vw/ and click on Tutorial.

       --active_learning
              active learning mode

       --active_simulation
              active learning simulation mode

       --active_mellowness arg
              active learning mellowness parameter c_0. Default 8

       --binary
              report loss as binary classification on -1,1

       --autolink arg
              create link function with polynomial d

       --sgd  use regular stochastic gradient descent update.

       --adaptive
              use adaptive, individual learning rates.

       --invariant
              use safe/importance aware updates.

       --normalized
              use per feature normalized updates

       --exact_adaptive_norm
              use current default invariant normalized adaptive update rule

       -a [ --audit ]
              print weights of features

       -b [ --bit_precision ] arg
              number of bits in the feature table

       --bfgs use bfgs optimization

       -c [ --cache ]
              Use a cache.  The default is <data>.cache

       --cache_file arg
              The location(s) of cache_file.

       --compressed
              use  gzip  format  whenever possible. If a cache file is being created, this option
              creates a compressed cache file.  A mixture of raw-text  &  compressed  inputs  are
              supported with autodetection.

       --no_stdin
              do not default to reading from stdin

       --conjugate_gradient
              use conjugate gradient based optimization

       --csoaa arg
              Use one-against-all multiclass learning with <k> costs

       --wap arg
              Use weighted all-pairs multiclass learning with <k> costs

       --csoaa_ldf arg
              Use  one-against-all  multiclass  learning  with label dependent features.  Specify
              singleline or multiline.

       --wap_ldf arg
              Use weighted all-pairs multiclass learning with label dependent features.

              Specify singleline or multiline.

       --cb arg
              Use contextual bandit learning with <k> costs

       --l1 arg
              l_1 lambda

       --l2 arg
              l_2 lambda

       -d [ --data ] arg
              Example Set

       --daemon
              persistent daemon mode on port 26542

       --num_children arg
              number of children for persistent daemon mode

       --pid_file arg
              Write pid file in persistent daemon mode

       --decay_learning_rate arg
              Set Decay factor for learning_rate between passes

       --input_feature_regularizer arg
              Per feature regularization input file

       -f [ --final_regressor ] arg
              Final regressor

       --readable_model arg
              Output human-readable final regressor

       --hash arg
              how to hash the features. Available options: strings, all

       --hessian_on
              use second derivative in line search

       --version
              Version information

       --ignore arg
              ignore namespaces beginning with character <arg>

       --keep arg
              keep namespaces beginning with character <arg>

       -k [ --kill_cache ]
              do not reuse existing cache: create a new one always

       --initial_weight arg
              Set all weights to an initial value of 1.

       -i [ --initial_regressor ] arg
              Initial regressor(s)

       --initial_pass_length arg
              initial number of examples per pass

       --initial_t arg
              initial t value

       --lda arg
              Run lda with <int> topics

       --span_server arg
              Location of server for setting up spanning tree

       --min_prediction arg
              Smallest prediction to output

       --max_prediction arg
              Largest prediction to output

       --mem arg
              memory in bfgs

       --nn arg
              Use sigmoidal feedforward network with <k> hidden units

       --noconstant
              Don't add a constant feature

       --noop do no learning

       --oaa arg
              Use one-against-all multiclass learning with <k> labels

       --ect arg
              Use error correcting tournament with <k> labels

       --output_feature_regularizer_binary arg
              Per feature regularization output file

       --output_feature_regularizer_text arg Per feature regularization output file,
              in text

       --port arg
              port to listen on

       --power_t arg
              t power value

       -l [ --learning_rate ] arg
              Set Learning Rate

       --passes arg
              Number of Training Passes

       --termination arg
              Termination threshold

       -p [ --predictions ] arg
              File to output predictions to

       -q [ --quadratic ] arg
              Create and use quadratic features

       --cubic arg
              Create and use cubic features

       --quiet
              Don't output diagnostics

       --rank arg
              rank for matrix factorization.

       --random_weights arg
              make initial weights random

       --random_seed arg
              seed random number generator

       -r [ --raw_predictions ] arg
              File to output unnormalized predictions to

       --ring_size arg
              size of example ring

       --examples arg
              number of examples to parse

       --save_per_pass
              Save the model after every pass over data

       --save_resume
              save extra state so learning can be resumed later with new data

       --sendto arg
              send examples to <host>

       --searn arg
              use searn, argument=maximum action id

       --searnimp arg
              use searn, argument=maximum action id or 0 for LDF

       -t [ --testonly ]
              Ignore label information and just test

       --loss_function arg (=squared)
              Specify the loss function to be used, uses squared by default. Currently  available
              ones are squared, classic, hinge, logistic and quantile.

       --quantile_tau arg (=0.5)
              Parameter \tau associated with Quantile loss. Defaults to 0.5

       --unique_id arg
              unique id used for cluster parallel jobs

       --total arg
              total number of nodes used in cluster parallel job

       --node arg
              node number in cluster parallel job

       --sort_features
              turn this on to disregard order in which features have been defined. This will lead
              to smaller cache sizes

       --ngram arg
              Generate N grams

       --skips arg
              Generate skips in N grams. This in conjunction with the ngram tag can  be  used  to
              generate generalized n-skip-k-gram.