File size: 11,201 Bytes
9375c9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 |
<html><!-- Created using the cpp_pretty_printer from the dlib C++ library. See http://dlib.net for updates. --><head><title>dlib C++ Library - utilities_abstract.h</title></head><body bgcolor='white'><pre>
<font color='#009900'>// Copyright (C) 2016 Davis E. King ([email protected])
</font><font color='#009900'>// License: Boost Software License See LICENSE.txt for the full license.
</font><font color='#0000FF'>#undef</font> DLIB_DNn_UTILITIES_ABSTRACT_H_
<font color='#0000FF'>#ifdef</font> DLIB_DNn_UTILITIES_ABSTRACT_H_
<font color='#0000FF'>#include</font> "<a style='text-decoration:none' href='core_abstract.h.html'>core_abstract.h</a>"
<font color='#0000FF'>#include</font> "<a style='text-decoration:none' href='../geometry/vector_abstract.h.html'>../geometry/vector_abstract.h</a>"
<font color='#0000FF'>namespace</font> dlib
<b>{</b>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'><u>double</u></font> <b><a name='log1pexp'></a>log1pexp</b><font face='Lucida Console'>(</font>
<font color='#0000FF'><u>double</u></font> x
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
ensures
- returns log(1+exp(x))
(except computes it using a numerically accurate method)
NOTE: For technical reasons, it is defined in misc.h.
!*/</font>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'><u>void</u></font> <b><a name='randomize_parameters'></a>randomize_parameters</b> <font face='Lucida Console'>(</font>
tensor<font color='#5555FF'>&</font> params,
<font color='#0000FF'><u>unsigned</u></font> <font color='#0000FF'><u>long</u></font> num_inputs_and_outputs,
dlib::rand<font color='#5555FF'>&</font> rnd
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
ensures
- This function assigns random values into params based on the given random
number generator. In particular, it uses the parameter initialization method
of formula 16 from the paper "Understanding the difficulty of training deep
feedforward neural networks" by Xavier Glorot and Yoshua Bengio.
- It is assumed that the total number of inputs and outputs from the layer is
num_inputs_and_outputs. That is, you should set num_inputs_and_outputs to
the sum of the dimensionalities of the vectors going into and out of the
layer that uses params as its parameters.
!*/</font>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font>
<font color='#0000FF'><u>void</u></font> <b><a name='net_to_xml'></a>net_to_xml</b> <font face='Lucida Console'>(</font>
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net,
std::ostream<font color='#5555FF'>&</font> out
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
requires
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or
add_tag_layer.
- All layers in the net must provide to_xml() functions.
ensures
- Prints the given neural network object as an XML document to the given output
stream.
!*/</font>
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font>
<font color='#0000FF'><u>void</u></font> <b><a name='net_to_xml'></a>net_to_xml</b> <font face='Lucida Console'>(</font>
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net,
<font color='#0000FF'>const</font> std::string<font color='#5555FF'>&</font> filename
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
requires
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or
add_tag_layer.
- All layers in the net must provide to_xml() functions.
ensures
- This function is just like the above net_to_xml(), except it writes to a file
rather than an ostream.
!*/</font>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font>
dpoint <b><a name='input_tensor_to_output_tensor'></a>input_tensor_to_output_tensor</b><font face='Lucida Console'>(</font>
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net,
dpoint p
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
requires
- net_type is an object of type add_layer, add_skip_layer, or add_tag_layer.
- All layers in the net must provide map_input_to_output() functions.
ensures
- Given a dpoint (i.e. a row,column coordinate) in the input tensor given to
net, this function returns the corresponding dpoint in the output tensor
net.get_output(). This kind of mapping is useful when working with fully
convolutional networks as you will often want to know what parts of the
output feature maps correspond to what parts of the input.
- If the network contains skip layers then any layers skipped over by the skip
layer are ignored for the purpose of computing this coordinate mapping. That
is, if you walk the network from the output layer to the input layer, where
each time you encounter a skip layer you jump to the layer indicated by the
skip layer, you will visit exactly the layers in the network involved in the
input_tensor_to_output_tensor() calculation. This behavior is useful since it
allows you to compute some auxiliary DNN as a separate branch of computation
that is separate from the main network's job of running some kind of fully
convolutional network over an image. For instance, you might want to have a
branch in your network that computes some global image level
summarization/feature.
!*/</font>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font>
dpoint <b><a name='output_tensor_to_input_tensor'></a>output_tensor_to_input_tensor</b><font face='Lucida Console'>(</font>
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net,
dpoint p
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
requires
- net_type is an object of type add_layer, add_skip_layer, or add_tag_layer.
- All layers in the net must provide map_output_to_input() functions.
ensures
- This function provides the reverse mapping of input_tensor_to_output_tensor().
That is, given a dpoint in net.get_output(), what is the corresponding dpoint
in the input tensor?
!*/</font>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font>
<font color='#0000FF'>inline</font> <font color='#0000FF'><u>size_t</u></font> <b><a name='count_parameters'></a>count_parameters</b><font face='Lucida Console'>(</font>
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
requires
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or
add_tag_layer.
ensures
- Returns the number of allocated parameters in the network. E.g. if the network has not
been trained then, since nothing has been allocated yet, it will return 0.
!*/</font>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'>template</font><font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font>
<font color='#0000FF'><u>void</u></font> <b><a name='set_all_learning_rate_multipliers'></a>set_all_learning_rate_multipliers</b><font face='Lucida Console'>(</font>
net_type<font color='#5555FF'>&</font> net,
<font color='#0000FF'><u>double</u></font> learning_rate_multiplier
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
requires
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or
add_tag_layer.
- learning_rate_multiplier >= 0
ensures
- Sets all learning_rate_multipliers and bias_learning_rate_multipliers in net
to learning_rate_multiplier.
!*/</font>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font>
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'><u>size_t</u></font> begin, <font color='#0000FF'><u>size_t</u></font> end, <font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font>
<font color='#0000FF'><u>void</u></font> <b><a name='set_learning_rate_multipliers_range'></a>set_learning_rate_multipliers_range</b><font face='Lucida Console'>(</font>
net_type<font color='#5555FF'>&</font> net,
<font color='#0000FF'><u>double</u></font> learning_rate_multiplier
<font face='Lucida Console'>)</font>;
<font color='#009900'>/*!
requires
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or
add_tag_layer.
- learning_rate_multiplier >= 0
- begin <= end <= net_type::num_layers
ensures
- Loops over the layers in the range [begin,end) in net and calls
set_learning_rate_multiplier on them with the value of
learning_rate_multiplier.
!*/</font>
<font color='#009900'>// ----------------------------------------------------------------------------------------
</font><b>}</b>
<font color='#0000FF'>#endif</font> <font color='#009900'>// DLIB_DNn_UTILITIES_ABSTRACT_H_
</font>
</pre></body></html> |