Skip to content

Commit

Permalink
disable lite gpu (#43178)
Browse files Browse the repository at this point in the history
  • Loading branch information
zhupengyang authored Jun 9, 2022
1 parent e161979 commit 3698030
Show file tree
Hide file tree
Showing 5 changed files with 3 additions and 44 deletions.
5 changes: 1 addition & 4 deletions cmake/external/lite.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
else()
set(LITE_BUILD_COMMAND $(MAKE) publish_inference -j)
set(LITE_OPTIONAL_ARGS -DWITH_MKL=ON
-DLITE_WITH_CUDA=${WITH_GPU}
-DLITE_WITH_CUDA=OFF
-DWITH_MKLDNN=OFF
-DLITE_WITH_X86=ON
-DLITE_WITH_PROFILE=OFF
Expand All @@ -115,9 +115,6 @@ if (NOT LITE_SOURCE_DIR OR NOT LITE_BINARY_DIR)
-DWITH_PYTHON=OFF
-DWITH_TESTING=OFF
-DLITE_BUILD_EXTRA=ON
-DCUDNN_ROOT=${CUDNN_ROOT}
-DLITE_WITH_STATIC_CUDA=OFF
-DCUDA_ARCH_NAME=${CUDA_ARCH_NAME}
-DLITE_WITH_XPU=${LITE_WITH_XPU}
-DXPU_SDK_URL=${XPU_BASE_URL}
-DXPU_SDK_ENV=${XPU_SDK_ENV}
Expand Down
4 changes: 0 additions & 4 deletions paddle/fluid/inference/lite/engine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#define LITE_WITH_CUDA 1
#endif

#ifdef LITE_SUBGRAPH_WITH_XPU
#define LITE_WITH_XPU 1
#endif
Expand Down
10 changes: 0 additions & 10 deletions paddle/fluid/inference/lite/test_tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -151,22 +151,12 @@ TEST(LiteEngineOp, TensorCopyAsync) {
auto* ctx_cpu =
platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
test_tensor_copy(*ctx_cpu);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
auto* ctx_gpu =
platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0));
test_tensor_copy(*ctx_gpu);
#endif
}

TEST(LiteEngineOp, TensorShare) {
auto* ctx_cpu =
platform::DeviceContextPool::Instance().Get(platform::CPUPlace());
test_tensor_share(*ctx_cpu);
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
auto* ctx_gpu =
platform::DeviceContextPool::Instance().Get(platform::CUDAPlace(0));
test_tensor_share(*ctx_gpu);
#endif
}

} // namespace utils
Expand Down
22 changes: 0 additions & 22 deletions paddle/fluid/inference/tests/api/lite_mul_model_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -119,35 +119,13 @@ TEST(AnalysisPredictor, lite_xpu) {
}
#endif

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
TEST(AnalysisPredictor, thread_local_stream) {
const size_t thread_num = 5;
std::vector<std::thread> threads(thread_num);
Barrier barrier(thread_num);
for (size_t i = 0; i < threads.size(); ++i) {
threads[i] = std::thread([&barrier, i]() {
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(FLAGS_infer_model + "/" + "mul_model");
config.EnableGpuMultiStream();
test_predictor(config, &barrier);
test_predictor_zero_copy(config);
});
}
for (auto& th : threads) {
th.join();
}
}

TEST(AnalysisPredictor, lite_engine) {
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(FLAGS_infer_model + "/" + "mul_model");
config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32);
test_predictor(config);
test_predictor_zero_copy(config);
}
#endif

} // namespace inference
} // namespace paddle
6 changes: 2 additions & 4 deletions paddle/fluid/inference/tests/api/lite_resnet50_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,9 @@ limitations under the License. */
namespace paddle {
namespace inference {

TEST(AnalysisPredictor, use_gpu) {
TEST(AnalysisPredictor, use_cpu) {
std::string model_dir = FLAGS_infer_model + "/" + "model";
AnalysisConfig config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir + "/model", model_dir + "/params");
config.EnableLiteEngine(paddle::AnalysisConfig::Precision::kFloat32, true);

Expand Down Expand Up @@ -73,10 +72,9 @@ TEST(AnalysisPredictor, use_gpu) {

namespace paddle_infer {

TEST(Predictor, use_gpu) {
TEST(Predictor, use_cpu) {
std::string model_dir = FLAGS_infer_model + "/" + "model";
Config config;
config.EnableUseGpu(100, 0);
config.SetModel(model_dir + "/model", model_dir + "/params");
config.EnableLiteEngine(PrecisionType::kFloat32);

Expand Down

0 comments on commit 3698030

Please sign in to comment.