Skip to content

Commit

Permalink
fix codestyle && some doc error
Browse files Browse the repository at this point in the history
  • Loading branch information
Difers committed Aug 25, 2023
1 parent 018dbc4 commit 2151375
Show file tree
Hide file tree
Showing 183 changed files with 238 additions and 383 deletions.
4 changes: 2 additions & 2 deletions python/paddle/dataset/uci_housing.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
UCI_TRAIN_DATA = None
UCI_TEST_DATA = None

FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/base/fit_a_line.base.tar'
FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/base/fit_a_line.fluid.tar'
FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b'


Expand Down Expand Up @@ -152,7 +152,7 @@ def reader():

def base_model():
parameter_tar = paddle.dataset.common.download(
FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.base.tar'
FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar'
)

tar = tarfile.TarFile(parameter_tar, mode='r')
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@

import paddle
import paddle.distributed as dist
from paddle.base import core
from paddle.base.framework import Program
from paddle.distributed.auto_parallel.static.converter import Converter
from paddle.distributed.auto_parallel.static.dist_context import (
get_default_distributed_context,
Expand All @@ -30,8 +32,6 @@
is_forward_op,
is_loss_op,
)
from paddle.base import core
from paddle.base.framework import Program
from paddle.static.io import deserialize_program

_valid_types = [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
import copy
import logging

from paddle.distributed.fleet.meta_optimizers.common import OpRole
from paddle.base.core import get_spmd_rule # noqa: F401
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from paddle.framework import core

from ..process_mesh import ProcessMesh, compute_compatible_process_mesh
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/auto_parallel/static/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@
import paddle
import paddle.distributed.auto_parallel.static.utils as auto_utils
from paddle import static, utils
from paddle.distributed import fleet
from paddle.base.executor import _to_name_str
from paddle.distributed import fleet
from paddle.framework import IrGraph
from paddle.framework import _current_expected_place as _get_device
from paddle.framework import core, in_dynamic_mode
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,9 @@
import numpy as np

import paddle
from paddle.base import program_guard
from paddle.base.backward import append_backward
from paddle.base.framework import Parameter, unique_name
from paddle.distributed.auto_parallel.process_mesh import ProcessMesh
from paddle.distributed.auto_parallel.static.cluster_v2 import DeviceMesh
from paddle.distributed.auto_parallel.static.completion import Completer
Expand All @@ -48,9 +51,6 @@
print_program_with_dist_attr,
)
from paddle.distributed.fleet.meta_optimizers.common import OpRole
from paddle.base import program_guard
from paddle.base.backward import append_backward
from paddle.base.framework import Parameter, unique_name

from ....utils.log_utils import get_logger
from ..graph import Graph
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
import paddle
import paddle.distributed as dist
from paddle import framework
from paddle.distributed.communication.group import _get_global_group
from paddle.base import data_feeder
from paddle.distributed.communication.group import _get_global_group


def _all_gather_into_tensor_in_dygraph(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
# limitations under the License.

from paddle import framework
from paddle.base import data_feeder
from paddle.distributed.communication.group import (
_get_global_group,
_warn_cur_rank_not_in_group,
)
from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op
from paddle.base import data_feeder


def _all_reduce_in_dygraph(tensor, op, group, sync_op, use_calc_stream):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@
import paddle
import paddle.distributed as dist
from paddle import framework
from paddle.base import data_feeder
from paddle.distributed.communication.group import (
_get_global_group,
_warn_cur_rank_not_in_group,
)
from paddle.base import data_feeder


def _all_to_all_tensor_in_dygraph(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
# limitations under the License.

from paddle import framework
from paddle.base import data_feeder
from paddle.distributed.communication.group import (
_get_global_group,
_get_or_throw_group_rank,
_warn_cur_rank_not_in_group,
)
from paddle.base import data_feeder


def _broadcast_in_dygraph(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/recv.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
# limitations under the License.

from paddle import framework
from paddle.base import data_feeder
from paddle.distributed.communication.group import (
_get_global_group,
_get_or_throw_group_rank,
_warn_cur_rank_not_in_group,
)
from paddle.base import data_feeder


def _recv_in_dygraph(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
# limitations under the License.

from paddle import framework
from paddle.base import data_feeder
from paddle.distributed.communication.group import (
_get_global_group,
_get_or_throw_group_rank,
_warn_cur_rank_not_in_group,
)
from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op
from paddle.base import data_feeder


def _reduce_in_dygraph(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,12 @@
import paddle
import paddle.distributed as dist
from paddle import framework
from paddle.base import data_feeder
from paddle.distributed.communication.group import (
_get_global_group,
_warn_cur_rank_not_in_group,
)
from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op
from paddle.base import data_feeder


def _reduce_scatter_tensor_in_dygraph(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/scatter.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@
import paddle
import paddle.distributed as dist
from paddle import framework
from paddle.base import data_feeder
from paddle.distributed.communication.group import (
_get_global_group,
_get_or_throw_group_rank,
_warn_cur_rank_not_in_group,
)
from paddle.base import data_feeder


def _scatter_tensor_in_dygraph(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/communication/stream/send.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,12 @@
# limitations under the License.

from paddle import framework
from paddle.base import data_feeder
from paddle.distributed.communication.group import (
_get_global_group,
_get_or_throw_group_rank,
_warn_cur_rank_not_in_group,
)
from paddle.base import data_feeder


def _send_in_dygraph(
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/fleet/base/distributed_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@
import google.protobuf.text_format

import paddle
from paddle.distributed.fleet.proto import distributed_strategy_pb2
from paddle.distributed.fleet.utils.log_util import logger
from paddle.base.framework import _global_flags
from paddle.base.wrapped_decorator import wrap_decorator
from paddle.distributed.fleet.proto import distributed_strategy_pb2
from paddle.distributed.fleet.utils.log_util import logger

__all__ = []

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/base/role_maker.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,10 @@
import numpy as np

import paddle
from paddle.base import core
from paddle.distributed.fleet.base.private_helper_function import (
wait_server_ready,
)
from paddle.base import core

from ...backup_env import getenv_or_backup

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/layers/mpu/mp_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@

import paddle
from paddle.autograd import PyLayer
from paddle.distributed import fleet
from paddle.base import core
from paddle.distributed import fleet
from paddle.nn import functional as F

from ....communication.reduce import ReduceOp, _get_reduce_op
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/layers/mpu/mp_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@
import paddle
from paddle import _legacy_C_ops
from paddle.autograd import PyLayer
from paddle.distributed import collective
from paddle.base.data_feeder import check_dtype, check_variable_and_dtype
from paddle.distributed import collective
from paddle.framework import LayerHelper, _create_tensor, in_dynamic_mode
from paddle.nn import Layer
from paddle.nn.utils import dygraph_utils
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/layers/mpu/random.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,9 @@

import paddle
from paddle import _legacy_C_ops
from paddle.common_ops_import import Variable
from paddle.base import core
from paddle.base.data_feeder import check_variable_and_dtype
from paddle.common_ops_import import Variable
from paddle.framework import LayerHelper, in_dynamic_mode

__all__ = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,9 @@
__all__ = []

import paddle
from paddle.common_ops_import import LayerHelper
from paddle.base import framework
from paddle.base.dygraph import base as imperative_base
from paddle.common_ops_import import LayerHelper
from paddle.framework import core, in_dynamic_mode
from paddle.nn.clip import ClipGradByNorm, append_gradient_clip_ops
from paddle.optimizer import Momentum, Optimizer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
import paddle.distributed as dist
from paddle import framework, nn
from paddle.autograd import PyLayer
from paddle.distributed import collective
from paddle.base.framework import EagerParamBase
from paddle.distributed import collective
from paddle.framework import core
from paddle.nn import ClipGradByGlobalNorm

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@

import paddle
from paddle import _C_ops, _legacy_C_ops
from paddle.common_ops_import import dygraph_only
from paddle.base import core
from paddle.base.dygraph import to_variable
from paddle.common_ops_import import dygraph_only
from paddle.nn import clip


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def forward(
# Note:
# If not marked non_differentiable, all output tensors' attr `stop gradient`
# will be reset to `False` in c++ backend.
# See https://github.com/PaddlePaddle/Paddle/blob/9d62efb0e6e5373823039d9eda96cd5905426c0a/paddle/base/pybind/eager_py_layer.cc#L388
# See https://github.com/PaddlePaddle/Paddle/blob/9d62efb0e6e5373823039d9eda96cd5905426c0a/paddle/fluid/pybind/eager_py_layer.cc#L388
if framework.in_dynamic_mode() and state:
ctx.mark_non_differentiable(arg)
else:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/distributed/fleet/scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@

import paddle
from paddle import _C_ops, _legacy_C_ops
from paddle.distributed import fleet
from paddle.base.dygraph import to_variable
from paddle.distributed import fleet
from paddle.framework import core

from .base.topology import ParallelMode
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,9 @@

import numpy as np

from paddle.distributed import fleet

# (TODO: GhostScreaming) It will be removed later.
from paddle.base import core
from paddle.distributed import fleet
from paddle.framework import Block, Program, in_dynamic_mode


Expand Down
6 changes: 3 additions & 3 deletions python/paddle/distributed/fleet/utils/hybrid_parallel_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,16 +14,16 @@

import paddle
from paddle import framework

# (TODO: GhostScreaming) It will be removed later.
from paddle.base import core
from paddle.distributed.parallel import (
_split_tensors,
build_groups,
in_dynamic_mode,
sync_params_buffers,
)

# (TODO: GhostScreaming) It will be removed later.
from paddle.base import core

from .log_util import logger

__all__ = []
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/distributed/fleet/utils/mix_precision_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,13 @@

import paddle
from paddle import _legacy_C_ops, nn
from paddle.base import framework
from paddle.base.dygraph import base as imperative_base
from paddle.base.dygraph import to_variable
from paddle.distributed import fleet
from paddle.distributed.fleet.utils.hybrid_parallel_util import (
obtain_optimizer_parameters_list,
)
from paddle.base import framework
from paddle.base.dygraph import base as imperative_base
from paddle.base.dygraph import to_variable
from paddle.framework import core


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@
import paddle
from paddle import distributed as dist
from paddle.autograd import PyLayer
from paddle.base import core
from paddle.distributed import fleet
from paddle.distributed.fleet.meta_parallel import get_rng_state_tracker
from paddle.distributed.fleet.utils.hybrid_parallel_util import (
fused_allreduce_gradients_with_group,
)
from paddle.base import core
from paddle.nn import Layer
from paddle.nn import functional as F

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,8 @@
ch.setFormatter(formatter)
logger.addHandler(ch)

from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY
from paddle.base import core
from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY
from paddle.static import Parameter

_supported_optimizer_type = [
Expand Down
3 changes: 1 addition & 2 deletions python/paddle/distributed/launch/context/device.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,9 @@

import os

from paddle.device import get_available_custom_device

# (TODO: GhostScreaming) It will be removed later.
from paddle.base import core
from paddle.device import get_available_custom_device


class DeviceType:
Expand Down
Loading

0 comments on commit 2151375

Please sign in to comment.