Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

(modified) Fc fp16 #44540

Merged
merged 1 commit into from
Jul 22, 2022
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 18 additions & 43 deletions paddle/phi/kernels/funcs/fc_functor.cu
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,24 @@ struct FcTypeTraits<double> {
typedef double4 Type;
};

#if defined(PADDLE_WITH_CUDA)
#include <cuda_fp16.h>

template <>
struct FcTypeTraits<float16> {
typedef half2 Type;
};
#else
struct float16_4 {
float16 x, y, z, w;
};

template <>
struct FcTypeTraits<float16> {
typedef float16_4 Type;
};
#endif

template <typename T, bool DoRelu>
__global__ void bias_relu_v4(const int num, const T* bias, T* data, int K) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
Expand Down Expand Up @@ -109,14 +127,6 @@ void AddReluKernel(
}

#if defined(PADDLE_WITH_CUDA)

#include <cuda_fp16.h>

template <>
struct FcTypeTraits<float16> {
typedef half2 Type;
};

template <bool DoRelu>
__global__ void bias_relu_v2(const int num,
const half2* bias,
Expand Down Expand Up @@ -200,46 +210,11 @@ void AddReluKernel(cudaStream_t stream,
}

#else

struct float16_4 {
float16 x, y, z, w;
};
template <>
struct FcTypeTraits<float16> {
typedef float16_4 Type;
};

template <bool DoRelu>
__global__ void bias_relu_v4(const int num,
const float16_4* bias,
float16_4* data,
int K) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < num) {
int bias_idx = tid % K;
const float16_4 bias_ptr = bias[bias_idx];
const float16_4 in_ptr = data[tid];
float16_4 packed_val;
packed_val.x = in_ptr.x + bias_ptr.x;
packed_val.y = in_ptr.y + bias_ptr.y;
packed_val.z = in_ptr.z + bias_ptr.z;
packed_val.w = in_ptr.w + bias_ptr.w;
if (DoRelu) {
packed_val.x = fmaxf(0.f, packed_val.x);
packed_val.y = fmaxf(0.f, packed_val.y);
packed_val.z = fmaxf(0.f, packed_val.z);
packed_val.w = fmaxf(0.f, packed_val.w);
}
data[tid] = packed_val;
}
}

template <bool DoRelu, int BlockDim>
__global__ void InplaceAddReluKernel(const int N,
const float16* bias,
float16* data) {
int offset = blockIdx.x * N;

for (int i = threadIdx.x; i < N; i += BlockDim) {
float16 temp;
temp = data[offset + i] + bias[i];
Expand Down