Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Cherry-pick]Cherry pick 0d output #53538

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
62a9e20
add 0D output support for inalg.slogdet,test=allcase
GGBond8488 Apr 13, 2023
62ae71a
fix zerom dime test error test=allcase
GGBond8488 Apr 16, 2023
7475731
fix test error test=allcase
GGBond8488 Apr 16, 2023
95416e1
add static backward test, test=allcase
GGBond8488 Apr 17, 2023
248c7c4
Merge branch 'release/2.5' of https://github.com/PaddlePaddle/Paddle …
GGBond8488 Apr 23, 2023
207665a
Merge branch 'release/2.5' of https://github.com/PaddlePaddle/Paddle …
GGBond8488 May 5, 2023
5285345
support_0D_output_for_matrix_rank_multi_dot, test=allcase
GGBond8488 Apr 12, 2023
935215f
add 0D output test for matrox_rank and mutli_dot test=allcase
GGBond8488 Apr 13, 2023
b87a0f2
fix assert error ,test=allcase
GGBond8488 Apr 13, 2023
9bfe7fe
fix test error, test=allcase
GGBond8488 Apr 14, 2023
84c532e
fix other test error, test=allcase
GGBond8488 Apr 14, 2023
dcfea56
fix other test error, test=allcase
GGBond8488 Apr 14, 2023
afefbc4
fix test error, test=allcase
GGBond8488 Apr 14, 2023
0bbed45
fix matrix_rank and multi dot test err test=allcase
GGBond8488 Apr 16, 2023
08e113e
fix test error test=allcase
GGBond8488 Apr 16, 2023
ad61250
fix test zero dim test, test=allcase
GGBond8488 Apr 17, 2023
2367698
add static backward test for multi_dot, test=allcase
GGBond8488 Apr 18, 2023
a1a2b8a
add tol 2d broadcast test case, test=allcase
GGBond8488 Apr 19, 2023
674d21a
fix test error test=allcase
GGBond8488 Apr 12, 2023
16d32af
fix test error test=allcase
GGBond8488 Apr 12, 2023
2b36425
test=allcase
GGBond8488 Apr 12, 2023
72226ad
support_0d_output_for_linalg.norm
GGBond8488 Apr 13, 2023
0dbdaeb
fix test error test=allcase
GGBond8488 Apr 14, 2023
5e5d3ad
fix 0D test
GGBond8488 Apr 16, 2023
9761511
fix test error test=allcase
GGBond8488 Apr 16, 2023
cbaadd1
fix test error test=allcase
GGBond8488 Apr 16, 2023
6d55482
fix tets,test=allcase
GGBond8488 Apr 17, 2023
75c0914
fix error,test=allcase
GGBond8488 Apr 17, 2023
c6120d9
fix errors ,test=allcase
GGBond8488 Apr 17, 2023
1958a5d
add static backward , test=allcase
GGBond8488 Apr 17, 2023
f0eb7d8
add static backwward test, test=allcase
GGBond8488 Apr 17, 2023
fbd3f2f
slogdet_support_0D_output
GGBond8488 Apr 17, 2023
ed7afa1
add new case
GGBond8488 Apr 20, 2023
00130f8
fix tests, test=allcase
GGBond8488 Apr 20, 2023
a6985d1
cherry-pick
GGBond8488 May 6, 2023
29a0378
cherry-pick
GGBond8488 Apr 22, 2023
a45a03d
fix trace gpu kernel 0d error, test=allcase
GGBond8488 Apr 23, 2023
04af909
fix windows error, test=allcase
GGBond8488 Apr 23, 2023
5a2a483
add matrixrank cherry-pick
GGBond8488 May 6, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/operators/matrix_rank_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace detail {
static DDim CheckAndGetOutputDim(const DDim& dim_x) {
auto x_vec = phi::vectorize(dim_x);
if (x_vec.size() == 2) {
return phi::make_ddim({1});
return phi::make_ddim({});
}
x_vec.erase(x_vec.end() - 2, x_vec.end());
return phi::make_ddim(x_vec);
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ static void BinarySameInputDimsCheck(const MetaTensor& x,
static DDim CheckAndGetOutputDim(const DDim& dim_x) {
auto x_vec = phi::vectorize(dim_x);
if (x_vec.size() == 2) {
return phi::make_ddim({1});
return phi::make_ddim({});
}
x_vec.erase(x_vec.end() - 2, x_vec.end());
return phi::make_ddim(x_vec);
Expand Down Expand Up @@ -990,7 +990,7 @@ void DistInferMeta(const MetaTensor& x,
"The Input(Y) has not been initialized properly. The "
"shape of Input(Y) = [%s].",
y_dims));
out->set_dims({1});
out->set_dims(phi::make_ddim({}));
out->set_dtype(x.dtype());
}

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/multiary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2344,7 +2344,7 @@ void MultiDotInferMeta(const std::vector<const MetaTensor*>& x,
// If the last tensor is 1D of size n view it as a column vector (n, 1)
if (last_dim.size() == 1) {
last_dim = phi::make_ddim({static_cast<int>(last_dim[0]), 1});
out_dim = is_vector ? phi::make_ddim({1}) : phi::make_ddim({first_dim[0]});
out_dim = is_vector ? phi::make_ddim({}) : phi::make_ddim({first_dim[0]});
} else {
out_dim = is_vector ? phi::make_ddim({last_dim[1]})
: phi::make_ddim({first_dim[0], last_dim[1]});
Expand Down
3 changes: 1 addition & 2 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ namespace detail {
static DDim CheckAndGetOutputDim(const DDim& dim_x) {
auto x_vec = phi::vectorize(dim_x);
if (x_vec.size() == 2) {
return phi::make_ddim({1});
return phi::make_ddim({});
}
x_vec.erase(x_vec.end() - 2, x_vec.end());
return phi::make_ddim(x_vec);
Expand Down Expand Up @@ -4405,7 +4405,6 @@ void TraceInferMeta(
auto sizes = vectorize(x_dims);
if (x_dims.size() == 2) {
sizes.clear();
sizes.push_back(1);
} else {
sizes.erase(sizes.begin() + std::max(dim1_, dim2_));
sizes.erase(sizes.begin() + std::min(dim1_, dim2_));
Expand Down
5 changes: 4 additions & 1 deletion paddle/phi/kernels/gpu/trace_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,10 @@ void TraceKernel(const Context& ctx,
auto diag = funcs::Diagonal<T, Context>(ctx, &x, offset, axis1, axis2);
if (diag.numel() > 0) {
std::vector<int> reduce_dims;
reduce_dims.push_back(out->dims().size());
// Adapt to 0D output
auto out_dim_size = out->dims().size();
if (out_dim_size == 0) out_dim_size = 1;
reduce_dims.push_back(out_dim_size);
funcs::ReduceKernel<T, T, kps::AddFunctor, kps::IdentityFunctor<T>>(
ctx, diag, out, kps::IdentityFunctor<T>(), reduce_dims);
} else {
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/kernels/impl/determinant_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,10 @@ void DeterminantGradKernel(const Context& dev_ctx,
" input tensor's, but here differ %d",
input_dims_size - out_grad.dims().size()));
} else if (input_dims_size == 2) {
// input dims size 2 and grad dims size 1 is possible
// input dims size 2 and grad dims size 0 is possible
PADDLE_ENFORCE_EQ(
out_grad.dims().size(),
1,
0,
phi::errors::InvalidArgument(
"The grad tensor of det dims size should be 2 less than"
" input tensor's, but here differ %d",
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/impl/determinant_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ void DeterminantKernel(const Context& dev_ctx,
out->Resize(output_dims);
} else {
// when input is a two-dimension matrix, The det value is a number.
out->Resize({1});
out->Resize(phi::make_ddim({}));
}
VLOG(10) << "output dim:" << out->dims();
}
Expand Down
3 changes: 2 additions & 1 deletion paddle/phi/kernels/impl/trace_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,8 @@ void TraceGradKernel(const Context& ctx,
auto input_dims = in_grad->dims();
auto input_stride = phi::stride(input_dims);
auto output_dims = out_grad.dims();
auto output_stride = phi::stride(output_dims);
auto output_stride = output_dims.size() == 0 ? phi::DDim(output_dims)
: phi::stride(output_dims);

auto* out_data = out_grad.data<T>();
T* x_data = ctx.template Alloc<T>(in_grad);
Expand Down
Loading