|
<html><head><title>dlib C++ Library - utilities_abstract.h</title></head><body bgcolor='white'><pre> |
|
<font color='#009900'>// Copyright (C) 2016 Davis E. King ([email protected]) |
|
</font><font color='#009900'>// License: Boost Software License See LICENSE.txt for the full license. |
|
</font><font color='#0000FF'>#undef</font> DLIB_DNn_UTILITIES_ABSTRACT_H_ |
|
<font color='#0000FF'>#ifdef</font> DLIB_DNn_UTILITIES_ABSTRACT_H_ |
|
|
|
<font color='#0000FF'>#include</font> "<a style='text-decoration:none' href='core_abstract.h.html'>core_abstract.h</a>" |
|
<font color='#0000FF'>#include</font> "<a style='text-decoration:none' href='../geometry/vector_abstract.h.html'>../geometry/vector_abstract.h</a>" |
|
|
|
<font color='#0000FF'>namespace</font> dlib |
|
<b>{</b> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font> |
|
<font color='#0000FF'><u>double</u></font> <b><a name='log1pexp'></a>log1pexp</b><font face='Lucida Console'>(</font> |
|
<font color='#0000FF'><u>double</u></font> x |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
ensures |
|
- returns log(1+exp(x)) |
|
(except computes it using a numerically accurate method) |
|
|
|
NOTE: For technical reasons, it is defined in misc.h. |
|
!*/</font> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font> |
|
<font color='#0000FF'><u>void</u></font> <b><a name='randomize_parameters'></a>randomize_parameters</b> <font face='Lucida Console'>(</font> |
|
tensor<font color='#5555FF'>&</font> params, |
|
<font color='#0000FF'><u>unsigned</u></font> <font color='#0000FF'><u>long</u></font> num_inputs_and_outputs, |
|
dlib::rand<font color='#5555FF'>&</font> rnd |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
ensures |
|
- This function assigns random values into params based on the given random |
|
number generator. In particular, it uses the parameter initialization method |
|
of formula 16 from the paper "Understanding the difficulty of training deep |
|
feedforward neural networks" by Xavier Glorot and Yoshua Bengio. |
|
- It is assumed that the total number of inputs and outputs from the layer is |
|
num_inputs_and_outputs. That is, you should set num_inputs_and_outputs to |
|
the sum of the dimensionalities of the vectors going into and out of the |
|
layer that uses params as its parameters. |
|
!*/</font> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font> |
|
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font> |
|
<font color='#0000FF'><u>void</u></font> <b><a name='net_to_xml'></a>net_to_xml</b> <font face='Lucida Console'>(</font> |
|
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net, |
|
std::ostream<font color='#5555FF'>&</font> out |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
requires |
|
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or |
|
add_tag_layer. |
|
- All layers in the net must provide to_xml() functions. |
|
ensures |
|
- Prints the given neural network object as an XML document to the given output |
|
stream. |
|
!*/</font> |
|
|
|
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font> |
|
<font color='#0000FF'><u>void</u></font> <b><a name='net_to_xml'></a>net_to_xml</b> <font face='Lucida Console'>(</font> |
|
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net, |
|
<font color='#0000FF'>const</font> std::string<font color='#5555FF'>&</font> filename |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
requires |
|
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or |
|
add_tag_layer. |
|
- All layers in the net must provide to_xml() functions. |
|
ensures |
|
- This function is just like the above net_to_xml(), except it writes to a file |
|
rather than an ostream. |
|
!*/</font> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font> |
|
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font> |
|
dpoint <b><a name='input_tensor_to_output_tensor'></a>input_tensor_to_output_tensor</b><font face='Lucida Console'>(</font> |
|
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net, |
|
dpoint p |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
requires |
|
- net_type is an object of type add_layer, add_skip_layer, or add_tag_layer. |
|
- All layers in the net must provide map_input_to_output() functions. |
|
ensures |
|
- Given a dpoint (i.e. a row,column coordinate) in the input tensor given to |
|
net, this function returns the corresponding dpoint in the output tensor |
|
net.get_output(). This kind of mapping is useful when working with fully |
|
convolutional networks as you will often want to know what parts of the |
|
output feature maps correspond to what parts of the input. |
|
- If the network contains skip layers then any layers skipped over by the skip |
|
layer are ignored for the purpose of computing this coordinate mapping. That |
|
is, if you walk the network from the output layer to the input layer, where |
|
each time you encounter a skip layer you jump to the layer indicated by the |
|
skip layer, you will visit exactly the layers in the network involved in the |
|
input_tensor_to_output_tensor() calculation. This behavior is useful since it |
|
allows you to compute some auxiliary DNN as a separate branch of computation |
|
that is separate from the main network's job of running some kind of fully |
|
convolutional network over an image. For instance, you might want to have a |
|
branch in your network that computes some global image level |
|
summarization/feature. |
|
!*/</font> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font> |
|
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font> |
|
dpoint <b><a name='output_tensor_to_input_tensor'></a>output_tensor_to_input_tensor</b><font face='Lucida Console'>(</font> |
|
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net, |
|
dpoint p |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
requires |
|
- net_type is an object of type add_layer, add_skip_layer, or add_tag_layer. |
|
- All layers in the net must provide map_output_to_input() functions. |
|
ensures |
|
- This function provides the reverse mapping of input_tensor_to_output_tensor(). |
|
That is, given a dpoint in net.get_output(), what is the corresponding dpoint |
|
in the input tensor? |
|
!*/</font> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font> |
|
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font> |
|
<font color='#0000FF'>inline</font> <font color='#0000FF'><u>size_t</u></font> <b><a name='count_parameters'></a>count_parameters</b><font face='Lucida Console'>(</font> |
|
<font color='#0000FF'>const</font> net_type<font color='#5555FF'>&</font> net |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
requires |
|
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or |
|
add_tag_layer. |
|
ensures |
|
- Returns the number of allocated parameters in the network. E.g. if the network has not |
|
been trained then, since nothing has been allocated yet, it will return 0. |
|
!*/</font> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font> |
|
<font color='#0000FF'>template</font><font color='#5555FF'><</font><font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font> |
|
<font color='#0000FF'><u>void</u></font> <b><a name='set_all_learning_rate_multipliers'></a>set_all_learning_rate_multipliers</b><font face='Lucida Console'>(</font> |
|
net_type<font color='#5555FF'>&</font> net, |
|
<font color='#0000FF'><u>double</u></font> learning_rate_multiplier |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
requires |
|
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or |
|
add_tag_layer. |
|
- learning_rate_multiplier >= 0 |
|
ensures |
|
- Sets all learning_rate_multipliers and bias_learning_rate_multipliers in net |
|
to learning_rate_multiplier. |
|
!*/</font> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font> |
|
<font color='#0000FF'>template</font> <font color='#5555FF'><</font><font color='#0000FF'><u>size_t</u></font> begin, <font color='#0000FF'><u>size_t</u></font> end, <font color='#0000FF'>typename</font> net_type<font color='#5555FF'>></font> |
|
<font color='#0000FF'><u>void</u></font> <b><a name='set_learning_rate_multipliers_range'></a>set_learning_rate_multipliers_range</b><font face='Lucida Console'>(</font> |
|
net_type<font color='#5555FF'>&</font> net, |
|
<font color='#0000FF'><u>double</u></font> learning_rate_multiplier |
|
<font face='Lucida Console'>)</font>; |
|
<font color='#009900'>/*! |
|
requires |
|
- net_type is an object of type add_layer, add_loss_layer, add_skip_layer, or |
|
add_tag_layer. |
|
- learning_rate_multiplier >= 0 |
|
- begin <= end <= net_type::num_layers |
|
ensures |
|
- Loops over the layers in the range [begin,end) in net and calls |
|
set_learning_rate_multiplier on them with the value of |
|
learning_rate_multiplier. |
|
!*/</font> |
|
|
|
<font color='#009900'>// ---------------------------------------------------------------------------------------- |
|
</font><b>}</b> |
|
|
|
<font color='#0000FF'>#endif</font> <font color='#009900'>// DLIB_DNn_UTILITIES_ABSTRACT_H_ |
|
</font> |
|
|
|
|
|
</pre></body></html> |