Skip to content

Commit

Permalink
refactor pass of conv_bias_fusion
Browse files Browse the repository at this point in the history
  • Loading branch information
zyfncg committed Nov 17, 2022
1 parent a2b55fe commit 39bd7ce
Show file tree
Hide file tree
Showing 15 changed files with 969 additions and 383 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/graph_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -713,7 +713,7 @@ static void GetGraphOpDesc(const std::vector<Node *> &nodes,
UpdateControlOpSkipEagerDeletionVars(*n, graph, graph_idx, n->Name());
}
ops->emplace_back(*n->Op());
VLOG(4) << n->ToString();
VLOG(5) << n->ToString();
}
// delete no OpDesc op
}
Expand Down
87 changes: 83 additions & 4 deletions paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.cc
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,40 @@ ConvBiasFusePass::ConvBiasFusePass() {
.IsStringIn({"NCHW", "NHWC", "AnyLayout"})
.End();

AddOpCompat(OpCompat("fused_conv2d"))
.AddInput("Input")
.IsTensor()
.End()
.AddInput("Filter")
.IsTensor()
.End()
.AddInput("Bias")
.IsTensor()
.IsOptional()
.End()
.AddOutput("Output")
.IsTensor()
.End()
.AddAttr("strides")
.IsType<std::vector<int>>()
.End()
.AddAttr("paddings")
.IsType<std::vector<int>>()
.End()
.AddAttr("padding_algorithm")
.IsOptional()
.IsStringIn({"EXPLICIT", "SAME", "VALID"})
.End()
.AddAttr("groups")
.IsNumGE(1)
.End()
.AddAttr("dilations")
.IsType<std::vector<int>>()
.End()
.AddAttr("data_format")
.IsStringIn({"NCHW", "NHWC", "AnyLayout"})
.End();

AddOpCompat(OpCompat("elementwise_add"))
.AddInput("X")
.IsTensor()
Expand Down Expand Up @@ -164,6 +198,40 @@ Conv3DBiasFusePass::Conv3DBiasFusePass() {
.IsStringIn({"NDHWC", "NCDHW"})
.End();

AddOpCompat(OpCompat("fused_conv3d"))
.AddInput("Input")
.IsTensor()
.End()
.AddInput("Filter")
.IsTensor()
.End()
.AddInput("Bias")
.IsTensor()
.IsOptional()
.End()
.AddOutput("Output")
.IsTensor()
.End()
.AddAttr("strides")
.IsType<std::vector<int>>()
.End()
.AddAttr("paddings")
.IsType<std::vector<int>>()
.End()
.AddAttr("padding_algorithm")
.IsOptional()
.IsStringIn({"EXPLICIT", "SAME", "VALID"})
.End()
.AddAttr("groups")
.IsNumGE(1)
.End()
.AddAttr("dilations")
.IsType<std::vector<int>>()
.End()
.AddAttr("data_format")
.IsStringIn({"NCHW", "NHWC", "AnyLayout"})
.End();

AddOpCompat(OpCompat("elementwise_add"))
.AddInput("X")
.IsTensor()
Expand Down Expand Up @@ -202,6 +270,16 @@ phi::DenseTensor tensor_apply_eltwise(const phi::DenseTensor& vec_a,
}

void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
FuseConvBias(graph, type(), fused_type());
if (type() != fused_type()) {
// Is the second pass useful?
FuseConvBias(graph, fused_type(), fused_type());
}
}

void ConvBiasFusePass::FuseConvBias(ir::Graph* graph,
const std::string& conv_type,
const std::string& fused_conv) const {
PADDLE_ENFORCE_NOT_NULL(
graph, platform::errors::InvalidArgument("Graph cannot be nullptr."));
FusePassBase::Init(name_scope_, graph);
Expand All @@ -215,9 +293,9 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
gpd.mutable_pattern()
->NewNode(patterns::PDNodeName(name_scope_, "conv_input"))
->AsInput()
->assert_is_op_input(type(), "Input");
->assert_is_op_input(conv_type, "Input");
patterns::ConvBias conv_bias_pattern(gpd.mutable_pattern(), name_scope_);
conv_bias_pattern(conv_input, type());
conv_bias_pattern(conv_input, conv_type);
int found_conv_bias_count = 0;
auto handler = [&](const GraphPatternDetector::subgraph_t& subgraph,
Graph* g) {
Expand Down Expand Up @@ -248,7 +326,7 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
// check if fuse can be done and if MKL-DNN should be used
FuseOptions fuse_option = FindFuseOption(*conv, *eltwise);
if (fuse_option == DO_NOT_FUSE || fuse_option == FUSE_NATIVE) {
VLOG(3) << "do not perform " + type() + "+bias fuse";
VLOG(3) << "do not perform " + conv_type + "+bias fuse";
return;
}

Expand Down Expand Up @@ -293,7 +371,7 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
desc.SetInput("Filter", std::vector<std::string>({conv_weight->Name()}));
desc.SetInput("Bias", std::vector<std::string>({eltwise_bias->Name()}));
desc.SetOutput("Output", std::vector<std::string>({eltwise_out->Name()}));
desc.SetType(type());
desc.SetType(fused_conv);

for (auto& attr : conv->Op()->GetAttrMap()) {
desc.SetAttr(attr.first, attr.second);
Expand All @@ -316,6 +394,7 @@ void ConvBiasFusePass::ApplyImpl(ir::Graph* graph) const {
gpd(graph, handler);
AddStatis(found_conv_bias_count);
}

} // namespace ir
} // namespace framework
} // namespace paddle
Expand Down
8 changes: 8 additions & 0 deletions paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,24 +32,32 @@ class ConvBiasFusePass : public FusePassBase {
ConvBiasFusePass();
virtual ~ConvBiasFusePass() {}
virtual std::string type() const { return "conv2d"; }
virtual std::string fused_type() const { return "fused_conv2d"; }

protected:
void ApplyImpl(ir::Graph* graph) const override;
void FuseConvBias(ir::Graph* graph,
const std::string& conv_type,
const std::string& fused_conv) const;

const std::string name_scope_{"conv_bias_mkldnn_fuse"};
};

/*
* Fuse the Conv3D and Elementwise_add to a Conv3DBiasOp.
*/
class Conv2DTransposeBiasFusePass : public ConvBiasFusePass {
public:
Conv2DTransposeBiasFusePass();
std::string type() const override { return "conv2d_transpose"; }
std::string fused_type() const override { return "conv2d_transpose"; }
};

class Conv3DBiasFusePass : public ConvBiasFusePass {
public:
Conv3DBiasFusePass();
std::string type() const override { return "conv3d"; }
std::string fused_type() const override { return "fused_conv3d"; }
};
} // namespace ir
} // namespace framework
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,8 @@ void MainTest(bool convWithExistingBias) {
int conv_bias_count = 0;

for (auto* node : graph->Nodes()) {
if (node->IsOp() && node->Op()->Type() == "conv2d") {
if (node->IsOp() && (node->Op()->Type() == "conv2d" ||
node->Op()->Type() == "fused_conv2d")) {
auto* op = node->Op();
ASSERT_TRUE(op->HasAttr("use_mkldnn"));
EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")));
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/ir/pass.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ class Pass {
}
attrs_[attr_name] = attr;
attr_dels_[attr_name] = [attr, attr_name]() {
VLOG(3) << "deleting " << attr_name;
VLOG(8) << "deleting " << attr_name;
delete attr;
};
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/op_desc.cc
Original file line number Diff line number Diff line change
Expand Up @@ -979,7 +979,7 @@ struct SetAttrDescVisitor {
};

void OpDesc::Flush() {
VLOG(4) << "Flush "
VLOG(8) << "Flush "
<< " " << Type() << " " << need_update_;
if (need_update_) {
this->desc_.mutable_inputs()->Clear();
Expand Down
12 changes: 12 additions & 0 deletions paddle/fluid/operators/conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,18 @@ void Conv2DOpMaker::Make() {
"H is the height of the filter, and W is the width of the filter. "
"If the groups attribute is greater than 1, C equals the number of "
"input image channels divided by the groups.");
AddInput("Bias",
"(Tensor) Bias to be added to each output of filter application."
"The format of output tensor is X (one-dimensional) of size equal"
"to the number of output channels. Only used with MKL-DNN.")
.AsDispensable()
.AsExtra();
AddInput("ResidualData",
"(Tensor) Tensor with residual data "
"to which convolution output will be added."
"Used with fuse_residual_connection fusion.")
.AsDispensable()
.AsExtra();
AddOutput("Output",
"(Tensor) The output tensor of convolution operator. "
"It has same data fromat and data type as the Input.");
Expand Down
21 changes: 11 additions & 10 deletions paddle/fluid/operators/fused/conv_fusion_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,17 @@ namespace operators {
class Conv2DFusionOpMaker : public Conv2DOpMaker {
protected:
void Apply() override {
AddInput("Bias",
"(Tensor) Bias to be added to each output of filter application."
"The format of output tensor is X (one-dimensional) of size equal"
"to the number of output channels.")
.AsDispensable();
AddInput("ResidualData",
"(Tensor) Tensor with residual data "
"to which convolution output will be added."
"Used with fuse_residual_connection fusion.")
.AsDispensable();
// AddInput("Bias",
// "(Tensor) Bias to be added to each output of filter
// application." "The format of output tensor is X
// (one-dimensional) of size equal" "to the number of output
// channels.")
// .AsDispensable();
// AddInput("ResidualData",
// "(Tensor) Tensor with residual data "
// "to which convolution output will be added."
// "Used with fuse_residual_connection fusion.")
// .AsDispensable();
AddAttr<std::string>(
"activation",
"The activation type can be 'identity', 'sigmoid', 'relu', 'relu6' "
Expand Down
Loading

0 comments on commit 39bd7ce

Please sign in to comment.