Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Cherry-pick] Optimize dygraph performance part2 #42224

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions paddle/fluid/framework/custom_operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ limitations under the License. */
#include "paddle/phi/api/all.h"
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/core/compat/convert_utils.h"
#include "paddle/phi/core/tensor_utils.h"
#include "paddle/utils/any.h"

namespace paddle {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/framework/new_executor/interpretercore.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/platform/os_info.h"
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/phi/core/kernel_context.h"
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "paddle/fluid/operators/controlflow/conditional_block_op_helper.h"
#include "paddle/fluid/operators/controlflow/recurrent_op_helper.h"
#include "paddle/fluid/operators/controlflow/while_op_helper.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h"

#ifdef PADDLE_WITH_MKLDNN
Expand Down
48 changes: 43 additions & 5 deletions paddle/fluid/framework/operator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ limitations under the License. */
#include "paddle/fluid/platform/profiler/event_tracing.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h"
#include "paddle/phi/ops/compat/signatures.h"

Expand Down Expand Up @@ -941,7 +942,7 @@ class RuntimeInferShapeContext : public InferShapeContext {
return ((op_with_kernel.kernel_type()) &&
(op_with_kernel.kernel_type()->data_layout_ ==
framework::DataLayout::kMKLDNN));
} catch (std::bad_cast exp) {
} catch (const std::bad_cast& exp) {
return false;
}
}
Expand Down Expand Up @@ -1966,6 +1967,36 @@ Scope* OperatorWithKernel::PrepareData(
}

void OperatorWithKernel::ParseInputDataType(
const Variable* var, const std::string& name,
proto::VarType::Type* data_type) const {
if (var != nullptr) {
const Tensor* t = nullptr;
if (var->IsType<Tensor>()) {
t = &var->Get<Tensor>();
} else if (var->IsType<LoDTensor>()) {
t = &var->Get<LoDTensor>();
} else if (var->IsType<phi::SelectedRows>()) {
t = &(var->Get<phi::SelectedRows>().value());
} else if (var->IsType<LoDTensorArray>()) {
auto t_arr = &var->Get<LoDTensorArray>();
for (size_t j = 0; j < t_arr->size(); j++) {
if (t_arr->at(j).IsInitialized()) {
t = &(t_arr->at(j));
}
}
}
if (t != nullptr) {
PADDLE_ENFORCE_EQ(
t->IsInitialized(), true,
platform::errors::InvalidArgument("The %s Op's Input Variable `%s` "
"contains uninitialized Tensor.",
Type(), name));
*data_type = paddle::framework::TransToProtoVarType(t->dtype());
}
}
}

void OperatorWithKernel::ParseMultiInputDataType(
const std::vector<Variable*>& vars, const std::string& name,
proto::VarType::Type* data_type) const {
proto::VarType::Type default_data_type =
Expand Down Expand Up @@ -2016,9 +2047,12 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType(
proto::VarType::Type dafault_data_type =
static_cast<proto::VarType::Type>(-1);
proto::VarType::Type data_type = dafault_data_type;
for (auto& input : ctx.InNameList()) {
const std::vector<Variable*> vars = ctx.MultiInputVar(input);
ParseInputDataType(vars, input, &data_type);
for (auto* name : ctx.InNameList()) {
if (ctx.InputSize(*name) == 1UL) {
ParseInputDataType(ctx.InputVar(*name), *name, &data_type);
} else {
ParseMultiInputDataType(ctx.MultiInputVar(*name), *name, &data_type);
}
}
PADDLE_ENFORCE_NE(
data_type, dafault_data_type,
Expand All @@ -2032,7 +2066,11 @@ proto::VarType::Type OperatorWithKernel::IndicateVarDataType(
proto::VarType::Type dafault_data_type =
static_cast<proto::VarType::Type>(-1);
proto::VarType::Type data_type = dafault_data_type;
ParseInputDataType(ctx.MultiInputVar(name), name, &data_type);
if (ctx.InputSize(name) == 1UL) {
ParseInputDataType(ctx.InputVar(name), name, &data_type);
} else {
ParseMultiInputDataType(ctx.MultiInputVar(name), name, &data_type);
}
PADDLE_ENFORCE_NE(
data_type, dafault_data_type,
platform::errors::InvalidArgument(
Expand Down
17 changes: 11 additions & 6 deletions paddle/fluid/framework/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,6 @@ limitations under the License. */
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/phi/core/compat/arg_map_context.h"
#include "paddle/phi/core/compat/op_utils.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/kernel_factory.h"

namespace paddle {
Expand All @@ -55,6 +54,10 @@ class Variable;
} // namespace framework
} // namespace paddle

namespace phi {
class KernelContext;
}

DECLARE_int32(inner_op_parallelism);

namespace paddle {
Expand Down Expand Up @@ -330,12 +333,12 @@ class ExecutionContext {
return it->second;
}

virtual std::vector<std::string> InNameList() const {
std::vector<std::string> vec_temp;
virtual paddle::SmallVector<const std::string*> InNameList() const {
paddle::SmallVector<const std::string*> vec_temp;
vec_temp.reserve(ctx_.inputs.size());

for (auto& input : ctx_.inputs) {
vec_temp.push_back(input.first);
vec_temp.push_back(&input.first);
}

return vec_temp;
Expand Down Expand Up @@ -677,9 +680,11 @@ class OperatorWithKernel : public OperatorBase {
// By default all input data must be same.
proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
// used for IndicateDataType
void ParseInputDataType(const std::vector<Variable*>& vars,
const std::string& name,
void ParseInputDataType(const Variable* vars, const std::string& name,
proto::VarType::Type* data_type) const;
void ParseMultiInputDataType(const std::vector<Variable*>& vars,
const std::string& name,
proto::VarType::Type* data_type) const;
// used for IndicateOrPromoteVarDataTypes
Tensor* GetTensorFormInputSafely(const ExecutionContext& ctx,
const std::string& name) const;
Expand Down
18 changes: 13 additions & 5 deletions paddle/fluid/imperative/execution_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,12 +117,12 @@ class DygraphExecutionContext : public framework::ExecutionContext {
return it->second;
}

std::vector<std::string> InNameList() const override {
std::vector<std::string> vec_temp;
paddle::SmallVector<const std::string*> InNameList() const override {
paddle::SmallVector<const std::string*> vec_temp;
vec_temp.reserve(var_map_in_.size());

for (auto& v : var_map_in_) {
vec_temp.push_back(v.first);
vec_temp.push_back(&v.first);
}

return vec_temp;
Expand All @@ -144,11 +144,19 @@ class DygraphExecutionContext : public framework::ExecutionContext {
}

size_t InputSize(const std::string& name) const override {
return InputNames(name).size();
auto it = var_map_in_.find(name);
PADDLE_ENFORCE_NE(
it, var_map_in_.end(),
platform::errors::NotFound("Can not find [%s] in Input", name));
return it->second.size();
}

size_t OutputSize(const std::string& name) const override {
return OutputNames(name).size();
auto it = var_map_out_.find(name);
PADDLE_ENFORCE_NE(
it, var_map_out_.end(),
platform::errors::NotFound("Can not find [%s] in Output", name));
return it->second.size();
}

const Variable* InputVar(const std::string& name) const override {
Expand Down
1 change: 1 addition & 0 deletions paddle/fluid/imperative/prepared_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@

#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_context.h"
#include "paddle/phi/core/selected_rows.h"

DECLARE_bool(use_mkldnn);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/transpose_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ class TransposeOp : public framework::OperatorWithKernel {
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
framework::LibraryType library_{framework::LibraryType::kPlain};
std::string data_format = ctx.Attr<std::string>("data_format");
auto &data_format = ctx.Attr<std::string>("data_format");
framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
#ifdef PADDLE_WITH_MKLDNN
Expand Down
50 changes: 50 additions & 0 deletions paddle/phi/core/attribute.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <string>
#include <vector>

#include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/int_array.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/common/scalar.h"
#include "paddle/utils/variant.h"

namespace phi {

class Place;

// NOTE: Add needed type in the future
using Attribute = paddle::variant<bool,
int,
int64_t,
float,
double,
std::string,
std::vector<bool>,
std::vector<int>,
std::vector<int64_t>,
std::vector<float>,
std::vector<double>,
std::vector<std::string>,
Scalar,
std::vector<Scalar>,
IntArray,
DataType,
DataLayout,
Place>;

} // namespace phi
34 changes: 33 additions & 1 deletion paddle/phi/core/infermeta_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ void InferMetaContext::EmplaceBackOutput(MetaTensor output) {
outputs_.emplace_back(std::move(output));
output_range_.emplace_back(std::pair<int, int>(index, index + 1));
}
void InferMetaContext::EmplaceBackAttr(paddle::any attr) {
void InferMetaContext::EmplaceBackAttr(Attribute attr) {
attrs_.emplace_back(std::move(attr));
}

Expand Down Expand Up @@ -120,6 +120,38 @@ std::vector<MetaTensor*> InferMetaContext::MutableOutputBetween(size_t start,
return result;
}

template <typename AttrType>
const AttrType& InferMetaContext::AttrAt(size_t idx) const {
try {
return paddle::get<AttrType>(attrs_.at(idx));
} catch (paddle::bad_variant_access const& e) {
PADDLE_THROW(phi::errors::InvalidArgument(
"Attribute cast error in InferMeta Context, the expected attribute "
"type is `%s`.",
std::type_index(typeid(AttrType)).name()));
}
}

template const bool& InferMetaContext::AttrAt(size_t idx) const;
template const int& InferMetaContext::AttrAt(size_t idx) const;
template const int64_t& InferMetaContext::AttrAt(size_t idx) const;
template const float& InferMetaContext::AttrAt(size_t idx) const;
template const double& InferMetaContext::AttrAt(size_t idx) const;
template const std::string& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<bool>& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<int>& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<int64_t>& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<float>& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<double>& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<std::string>& InferMetaContext::AttrAt(
size_t idx) const;
template const Scalar& InferMetaContext::AttrAt(size_t idx) const;
template const std::vector<Scalar>& InferMetaContext::AttrAt(size_t idx) const;
template const IntArray& InferMetaContext::AttrAt(size_t idx) const;
template const DataType& InferMetaContext::AttrAt(size_t idx) const;
template const DataLayout& InferMetaContext::AttrAt(size_t idx) const;
template const Place& InferMetaContext::AttrAt(size_t idx) const;

MetaFnFactory& MetaFnFactory::Instance() {
static MetaFnFactory g_meta_fn_map;
return g_meta_fn_map;
Expand Down
Loading