Skip to content

Commit

Permalink
[NNAdapter][IntelOpenVINO] Init support for OpenVINO (#8552)
Browse files Browse the repository at this point in the history
  • Loading branch information
csy0225 authored Mar 3, 2022
1 parent 50244cb commit afa3c18
Show file tree
Hide file tree
Showing 23 changed files with 1,521 additions and 70 deletions.
3 changes: 3 additions & 0 deletions cmake/configure.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -244,6 +244,9 @@ if (LITE_WITH_NNADAPTER)
if (NNADAPTER_WITH_ANDROID_NNAPI)
add_definitions("-DNNADAPTER_WITH_ANDROID_NNAPI")
endif()
if (NNADAPTER_WITH_INTEL_OPENVINO)
add_definitions("-DNNADAPTER_WITH_INTEL_OPENVINO")
endif()
endif()
endif()

Expand Down
4 changes: 4 additions & 0 deletions lite/backends/nnadapter/nnadapter/src/driver/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -57,3 +57,7 @@ endif()
if(NNADAPTER_WITH_ANDROID_NNAPI)
add_subdirectory(android_nnapi)
endif()

if(NNADAPTER_WITH_INTEL_OPENVINO)
add_subdirectory(intel_openvino)
endif()
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

set(DEVICE_NAME intel_openvino)
add_definitions(-DNNADAPTER_DEVICE_NAME=${DEVICE_NAME})
add_definitions(-DNNADAPTER_DEVICE_SYMBOL=${NNADAPTER_DEVICE_SYMBOL_PREFIX}${DEVICE_NAME})

include(dependencies.cmake)

aux_source_directory(converter CONVERTERS)
set(SRCS engine.cc utility.cc driver.cc ${CONVERTERS})
set(DEPS ${NNADAPTER_OPERATIONS} ${NNADAPTER_UTILITIES} ${${DEVICE_NAME}_deps})

add_library(${DEVICE_NAME} SHARED ${SRCS})
target_link_libraries(${DEVICE_NAME} "-Wl,--start-group" ${DEPS} "-Wl,--end-group")
set(NNADAPTER_DEVICES ${NNADAPTER_DEVICES} ${DEVICE_NAME} CACHE INTERNAL "")
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#ifndef __NNADAPTER_DRIVER_INTEL_OPENVINO_CONVERTER_ALL_H__ // NOLINT
#define __NNADAPTER_DRIVER_INTEL_OPENVINO_CONVERTER_ALL_H__

REGISTER_CONVERTER(ABS, ConvertUnaryActivations)
REGISTER_CONVERTER(ADD, ConvertElementwise)
REGISTER_CONVERTER(AVERAGE_POOL_2D, ConvertPool2D)
REGISTER_CONVERTER(BATCH_NORMALIZATION, ConvertBatchNormalization)
REGISTER_CONVERTER(CONV_2D, ConvertConv2D)
REGISTER_CONVERTER(DIV, ConvertElementwise)
REGISTER_CONVERTER(EQUAL, ConvertElementwise)
REGISTER_CONVERTER(EXP, ConvertUnaryActivations)
REGISTER_CONVERTER(FLOOR, ConvertUnaryActivations)
REGISTER_CONVERTER(GREATER_EQUAL, ConvertElementwise)
REGISTER_CONVERTER(LOG, ConvertUnaryActivations)
REGISTER_CONVERTER(MAT_MUL, ConvertMatMul)
REGISTER_CONVERTER(MAX, ConvertElementwise)
REGISTER_CONVERTER(MAX_POOL_2D, ConvertPool2D)
REGISTER_CONVERTER(MIN, ConvertElementwise)
REGISTER_CONVERTER(MUL, ConvertElementwise)
REGISTER_CONVERTER(POW, ConvertElementwise)
REGISTER_CONVERTER(RELU, ConvertUnaryActivations)
REGISTER_CONVERTER(RESHAPE, ConvertReshape)
REGISTER_CONVERTER(SOFTMAX, ConvertSoftmax)
REGISTER_CONVERTER(SUB, ConvertElementwise)
REGISTER_CONVERTER(TANH, ConvertUnaryActivations)

#endif // NOLINT
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "operation/batch_normalization.h"
#include "driver/intel_openvino/converter/converter.h"
#include "utility/debug.h"
#include "utility/logging.h"

namespace nnadapter {
namespace intel_openvino {

int ConvertBatchNormalization(Converter* converter,
core::Operation* operation) {
BATCH_NORMALIZATION_OPERATION_EXTRACT_INPUTS_OUTPUTS

// Convert operand to OpenVINO tensor
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
auto gamma_tensor = converter->ConvertOperand(scale_operand);
auto beta_tensor = converter->ConvertOperand(bias_operand);
auto mean_tensor = converter->ConvertOperand(mean_operand);
auto variance_tensor = converter->ConvertOperand(variance_operand);
auto batch_norm_op =
std::make_shared<default_opset::BatchNormInference>(*input_tensor,
*gamma_tensor,
*beta_tensor,
*mean_tensor,
*variance_tensor,
epsilon);
MAP_OUTPUT(output_operand, batch_norm_op, 0);
return NNADAPTER_NO_ERROR;
}

} // namespace intel_openvino
} // namespace nnadapter
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "operation/conv2d.h"
#include "driver/intel_openvino/converter/converter.h"
#include "utility/debug.h"
#include "utility/logging.h"

namespace nnadapter {
namespace intel_openvino {

int ConvertConv2D(Converter* converter, core::Operation* operation) {
CONV_2D_OPERATION_EXTRACT_INPUTS_OUTPUTS
if (auto_pad != NNADAPTER_AUTO_PAD_NONE) {
operation::UpdateConv2DPadAndDilation(
input_operand->type.dimensions.data[2],
filter_height,
auto_pad,
&pad_height_top,
&pad_height_bottom,
stride_height,
&dilation_height);
operation::UpdateConv2DPadAndDilation(
input_operand->type.dimensions.data[3],
filter_width,
auto_pad,
&pad_width_left,
&pad_width_right,
stride_width,
&dilation_width);
}

// Convert operand to OpenVINO tensor
auto input_tensor = converter->GetMappedTensor(input_operand);
if (!input_tensor) {
input_tensor = converter->ConvertOperand(input_operand);
}
auto filter_tensor = converter->ConvertOperand(filter_operand);
auto ov_auto_pad = ConvertToOVPadType(auto_pad);
auto ov_strides = ov::Strides(
{static_cast<size_t>(stride_height), static_cast<size_t>(stride_width)});
auto ov_diliations = ov::Strides({static_cast<size_t>(dilation_height),
static_cast<size_t>(dilation_width)});
auto ov_pads_begin =
ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_top),
static_cast<std::ptrdiff_t>(pad_width_left)});
auto ov_pads_end =
ov::CoordinateDiff({static_cast<std::ptrdiff_t>(pad_height_bottom),
static_cast<std::ptrdiff_t>(pad_width_right)});
auto conv2d_op = std::make_shared<default_opset::Convolution>(*input_tensor,
*filter_tensor,
ov_strides,
ov_pads_begin,
ov_pads_end,
ov_diliations,
ov_auto_pad);
auto output_tensor = MAP_OUTPUT(output_operand, conv2d_op, 0);
// Bias
auto bias_tensor = converter->ConvertOperand(bias_operand);
auto unsqueeze_op = converter->AddUnsqueezeOperator(
bias_tensor, std::vector<int64_t>({0, 2, 3}));
auto add_op = std::make_shared<default_opset::Add>(*output_tensor,
unsqueeze_op->output(0));
output_tensor = MAP_OUTPUT(output_operand, add_op, 0);
// Fuse activation
switch (fuse_code) {
#define CONVERT_UNARY_ACTIVATION(type, class_name) \
case NNADAPTER_FUSED_##type: { \
auto act_op = std::make_shared<default_opset::class_name>(*output_tensor); \
MAP_OUTPUT(output_operand, act_op, 0); \
} break;
CONVERT_UNARY_ACTIVATION(RELU, Relu);
#undef CONVERT_UNARY_ACTIVATION
case NNADAPTER_FUSED_NONE:
break;
default:
NNADAPTER_LOG(FATAL) << "Unsupported fuse_code(" << fuse_code
<< ") is found.";
break;
}
return NNADAPTER_NO_ERROR;
}

} // namespace intel_openvino
} // namespace nnadapter
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "driver/intel_openvino/converter/converter.h"
#include <unistd.h>
#include <algorithm>
#include <utility>
#include <vector>
#include "utility/debug.h"
#include "utility/logging.h"
#include "utility/modeling.h"
#include "utility/string.h"
#include "utility/utility.h"

namespace nnadapter {
namespace intel_openvino {

#define REGISTER_CONVERTER(__op_type__, __func_name__) \
extern int __func_name__(Converter* converter, core::Operation* operation);
#include "driver/intel_openvino/converter/all.h" // NOLINT
#undef __NNADAPTER_DRIVER_INTEL_OPENVINO_CONVERTER_ALL_H__
#undef REGISTER_CONVERTER

int Converter::Apply(core::Model* model) {
std::vector<core::Operation*> operations =
SortOperationsInTopologicalOrder(model);
for (auto operation : operations) {
NNADAPTER_VLOG(5) << "Converting " << OperationTypeToString(operation->type)
<< " ...";
switch (operation->type) {
#define REGISTER_CONVERTER(__op_type__, __func_name__) \
case NNADAPTER_##__op_type__: \
__func_name__(this, operation); \
break;
#include "driver/intel_openvino/converter/all.h" // NOLINT
#undef __NNADAPTER_DRIVER_INTEL_OPENVINO_CONVERTER_ALL_H__
#undef REGISTER_CONVERTER
default:
NNADAPTER_LOG(FATAL) << "Unsupported operation("
<< OperationTypeToString(operation->type)
<< ") is found.";
break;
}
}
return NNADAPTER_NO_ERROR;
}

std::shared_ptr<Tensor> Converter::GetMappedTensor(core::Operand* operand) {
auto it = tensor_map_->find(operand);
if (it != tensor_map_->end()) {
return it->second.back();
}
return nullptr;
}

std::shared_ptr<Tensor> Converter::UpdateTensorMap(
core::Operand* operand, std::shared_ptr<Tensor> tensor) {
auto it = tensor_map_->find(operand);
if (it == tensor_map_->end()) {
auto result = tensor_map_->insert(
std::make_pair(operand, std::vector<std::shared_ptr<Tensor>>()));
NNADAPTER_CHECK(result.second);
it = result.first;
}
tensor->set_names({OperandIdToString(operand)});
it->second.push_back(tensor);
return tensor;
}

std::shared_ptr<Tensor> Converter::ConvertOperand(
core::Operand* operand, std::vector<int32_t> dimensions) {
if (dimensions.empty()) {
for (uint32_t i = 0; i < operand->type.dimensions.count; i++) {
dimensions.push_back(operand->type.dimensions.data[i]);
}
}
if (IsConstantOperand(operand)) {
auto constant_op = std::make_shared<default_opset::Constant>(
ConvertToOVElementType(operand->type.precision),
ConvertToOVShape(dimensions),
operand->buffer);
auto output_tensor = std::make_shared<Tensor>(constant_op->output(0));
UpdateTensorMap(operand, output_tensor);
return output_tensor;
} else if (IsModelInputOperand(operand)) {
auto parameter_node = std::make_shared<default_opset::Parameter>(
ConvertToOVElementType(operand->type.precision),
ConvertToOVShape(dimensions));
parameter_nodes_->push_back(parameter_node);
auto output_tensor = std::make_shared<Tensor>(parameter_node->output(0));
UpdateTensorMap(operand, output_tensor);
return output_tensor;
}
NNADAPTER_LOG(FATAL) << "Only constant and model input operands can be "
"converted to OpenVINO Tensor!";
return nullptr;
}

} // namespace intel_openvino
} // namespace nnadapter
Loading

0 comments on commit afa3c18

Please sign in to comment.