Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Generalize bilinear filler to for N-D multilinear filler #3984

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 28 additions & 20 deletions include/caffe/filler.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -208,11 +208,11 @@ class MSRAFiller : public Filler<Dtype> {
};

/*!
@brief Fills a Blob with coefficients for bilinear interpolation.
@brief Fills a Blob with coefficients for multilinear interpolation.

A common use case is with the DeconvolutionLayer acting as upsampling.
You can upsample a feature map with shape of (B, C, H, W) by any integer factor
using the following proto.
You can upsample a feature map with shape of (B, C, S_n,..., S_1) by any
integer factor using the following proto.
\code
layer {
name: "upsample", type: "Deconvolution"
Expand All @@ -221,40 +221,48 @@ layer {
kernel_size: {{2 * factor - factor % 2}} stride: {{factor}}
num_output: {{C}} group: {{C}}
pad: {{ceil((factor - 1) / 2.)}}
weight_filler: { type: "bilinear" } bias_term: false
weight_filler: { type: "multilinear" } bias_term: false
}
param { lr_mult: 0 decay_mult: 0 }
}
\endcode
Please use this by replacing `{{}}` with your values. By specifying
`num_output: {{C}} group: {{C}}`, it behaves as
channel-wise convolution. The filter shape of this deconvolution layer will be
(C, 1, K, K) where K is `kernel_size`, and this filler will set a (K, K)
interpolation kernel for every channel of the filter identically. The resulting
shape of the top feature map will be (B, C, factor * H, factor * W).
(C, 1, K_n,..., K_1) where K_i is `kernel_size` in dimension i, and this filler
will set a (K_n,..., K_1) interpolation kernel for every channel of the filter
identically. The resulting shape of the top feature map will be
(B, C, factor_n * S_n,..., factor_1 * S_1).
Note that the learning rate and the
weight decay are set to 0 in order to keep coefficient values of bilinear
weight decay are set to 0 in order to keep coefficient values of multilinear
interpolation unchanged during training. If you apply this to an image, this
operation is equivalent to the following call in Python with Scikit.Image.
\code{.py}
out = skimage.transform.rescale(img, factor, mode='constant', cval=0)
out = skimage.transform.rescale(img, (factor_y, factor_x), mode='constant', cval=0)
\endcode
*/
template <typename Dtype>
class BilinearFiller : public Filler<Dtype> {
class MultilinearFiller : public Filler<Dtype> {
public:
explicit BilinearFiller(const FillerParameter& param)
explicit MultilinearFiller(const FillerParameter& param)
: Filler<Dtype>(param) {}
virtual void Fill(Blob<Dtype>* blob) {
CHECK_EQ(blob->num_axes(), 4) << "Blob must be 4 dim.";
CHECK_EQ(blob->width(), blob->height()) << "Filter must be square";
CHECK_GE(blob->num_axes(), 3) << "Blob must have at least 3 dimensions.";
Dtype* data = blob->mutable_cpu_data();
int f = ceil(blob->width() / 2.);
float c = (2 * f - 1 - f % 2) / (2. * f);
for (int i = 0; i < blob->count(); ++i) {
float x = i % blob->width();
float y = (i / blob->width()) % blob->height();
data[i] = (1 - fabs(x / f - c)) * (1 - fabs(y / f - c));
unsigned int stride = 1;
Dtype weight = 1;
for (int axis = 0; axis < blob->num_axes() - 2; ++axis) {
unsigned int shape = blob->shape(axis + 2);
unsigned int factor = std::ceil(shape / 2.0f);
Dtype center = (2 * factor - 1 - factor % 2)
/ static_cast<Dtype>(2 * factor);
Dtype coordinate = ((i / stride) % shape)
/ static_cast<Dtype>(factor);
weight *= 1 - std::abs(coordinate - center);
stride *= shape;
}
data[i] = weight;
}
CHECK_EQ(this->filler_param_.sparse(), -1)
<< "Sparsity not supported by this Filler.";
Expand Down Expand Up @@ -282,8 +290,8 @@ Filler<Dtype>* GetFiller(const FillerParameter& param) {
return new XavierFiller<Dtype>(param);
} else if (type == "msra") {
return new MSRAFiller<Dtype>(param);
} else if (type == "bilinear") {
return new BilinearFiller<Dtype>(param);
} else if (type == "multilinear") {
return new MultilinearFiller<Dtype>(param);
} else {
CHECK(false) << "Unknown filler name: " << param.type();
}
Expand Down
6 changes: 6 additions & 0 deletions include/caffe/util/upgrade_proto.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,12 @@ bool NetNeedsInputUpgrade(const NetParameter& net_param);
// Perform all necessary transformations to upgrade input fields into layers.
void UpgradeNetInput(NetParameter* net_param);

// Return true iff the Net contains any deprecated weight fillers.
bool NetNeedsWeightFillerUpgrade(const NetParameter& net_param);

// Perform all necessary transformations to upgrade deprecated weight fillers.
void UpgradeNetWeightFiller(NetParameter* net_param);

// Return true iff the solver contains any old solver_type specified as enums
bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param);

Expand Down
38 changes: 37 additions & 1 deletion src/caffe/util/upgrade_proto.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,8 @@ namespace caffe {

bool NetNeedsUpgrade(const NetParameter& net_param) {
return NetNeedsV0ToV1Upgrade(net_param) || NetNeedsV1ToV2Upgrade(net_param)
|| NetNeedsDataUpgrade(net_param) || NetNeedsInputUpgrade(net_param);
|| NetNeedsDataUpgrade(net_param) || NetNeedsInputUpgrade(net_param)
|| NetNeedsWeightFillerUpgrade(net_param);
}

bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
Expand Down Expand Up @@ -71,6 +72,14 @@ bool UpgradeNetAsNeeded(const string& param_file, NetParameter* param) {
LOG(WARNING) << "Note that future Caffe releases will only support "
<< "input layers and not input fields.";
}
// NetParameter uses deprecated weight fillers; try to upgrade it.
if (NetNeedsWeightFillerUpgrade(*param)) {
LOG(INFO) << "Attempting to upgrade input file specified using deprecated "
<< "weight fillers: " << param_file;
UpgradeNetWeightFiller(param);
LOG(INFO) << "Successfully upgraded file specified using deprecated "
<< "weight fillers.";
}
return success;
}

Expand Down Expand Up @@ -991,6 +1000,33 @@ void UpgradeNetInput(NetParameter* net_param) {
net_param->clear_input_dim();
}

bool NetNeedsWeightFillerUpgrade(const NetParameter& net_param) {
// Iterate over all layers and look for deprecated weight filler types.
for (int i = 0; i < net_param.layer_size(); ++i) {
if (net_param.layer(i).has_convolution_param() &&
net_param.layer(i).convolution_param().has_weight_filler()) {
const FillerParameter filler_param = net_param.layer(i).
convolution_param().weight_filler();
if (filler_param.type() == "bilinear") { return true; }
}
}
return false;
}

void UpgradeNetWeightFiller(NetParameter* net_param) {
// Iterate over all layers and change deprecated weight filler types.
for (int i = 0; i < net_param->layer_size(); ++i) {
if (net_param->layer(i).has_convolution_param() &&
net_param->layer(i).convolution_param().has_weight_filler()) {
FillerParameter* filler_param = net_param->mutable_layer(i)->
mutable_convolution_param()->mutable_weight_filler();
if (filler_param->type() == "bilinear") {
filler_param->set_type("multilinear");
}
}
}
}

// Return true iff the solver contains any old solver_type specified as enums
bool SolverNeedsTypeUpgrade(const SolverParameter& solver_param) {
if (solver_param.has_solver_type()) {
Expand Down