Skip to content

Commit

Permalink
[remove fluid] under unittesets of linear api (PaddlePaddle#48564)
Browse files Browse the repository at this point in the history
* [remove fluid] under unittesets of linear api

* [remove fluid] under unittesets of linear api

* [remove fluid] under unittesets of linear api

* [remove fluid] under unittesets of linear api

* [remove fluid] under unittesets of linear api

* [remove fluid] under unittesets of linear api

* [remove fluid] fluid dygrapn linear api

* [remove fluid] fluid dygrapn linear api

* [remove fluid] fluid dygrapn linear api
  • Loading branch information
wangzhen38 authored Dec 8, 2022
1 parent 082886c commit 99a9dcd
Show file tree
Hide file tree
Showing 8 changed files with 50 additions and 614 deletions.
4 changes: 2 additions & 2 deletions python/paddle/distributed/sharding/group_sharded.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def group_sharded_parallel(
# required: distributed
import paddle
from paddle.fluid.dygraph.nn import Linear
from paddle.nn import Linear
from paddle.distributed import fleet
from paddle.distributed.sharding import group_sharded_parallel
Expand Down Expand Up @@ -238,7 +238,7 @@ def save_group_sharded_model(model, output, optimizer=None):
# required: distributed
import paddle
from paddle.fluid.dygraph.nn import Linear
from paddle.nn import Linear
from paddle.distributed import fleet
from paddle.distributed.sharding import group_sharded_parallel, save_group_sharded_model
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from paddle.fluid.contrib.slim.quantization import ImperativeQuantAware
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.nn import Sequential
from paddle.fluid.dygraph import Linear
from paddle.nn import Linear
from paddle.nn.quant.quant_layers import QuantizedConv2DTranspose
from paddle.fluid.log_helper import get_logger
from paddle.fluid.framework import _test_eager_guard
Expand Down Expand Up @@ -111,7 +111,7 @@ class ModelForConv2dT(nn.Layer):
def __init__(self, num_classes=10):
super().__init__()
self.features = nn.Conv2DTranspose(4, 6, (3, 3))
self.fc = Linear(input_dim=600, output_dim=num_classes)
self.fc = Linear(600, num_classes)

def forward(self, inputs):
x = self.features(inputs)
Expand Down Expand Up @@ -143,11 +143,9 @@ def __init__(self, num_classes=10, classifier_activation='softmax'):
)

self.fc = Sequential(
Linear(input_dim=400, output_dim=120),
Linear(input_dim=120, output_dim=84),
Linear(
input_dim=84, output_dim=num_classes, act=classifier_activation
),
Linear(400, 120),
Linear(120, 84),
Linear(84, num_classes),
)

def forward(self, inputs):
Expand Down
20 changes: 12 additions & 8 deletions python/paddle/fluid/dygraph/learning_rate_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -821,11 +821,12 @@ class ReduceLROnPlateau(LearningRateDecay):
.. code-block:: python
import paddle.fluid as fluid
import paddle
import numpy as np
with fluid.dygraph.guard():
x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
linear = fluid.dygraph.Linear(10, 10)
linear = paddle.nn.Linear(10, 10)
input = fluid.dygraph.to_variable(x)
reduce_lr = fluid.dygraph.ReduceLROnPlateau(
Expand All @@ -842,7 +843,7 @@ class ReduceLROnPlateau(LearningRateDecay):
total_loss = 0
for bath_id in range(5):
out = linear(input)
loss = fluid.layers.reduce_mean(out)
loss = paddle.mean(out)
total_loss += loss
adam.minimize(loss)
Expand Down Expand Up @@ -1090,17 +1091,18 @@ class StepDecay(_LearningRateEpochDecay):
import paddle.fluid as fluid
import numpy as np
import paddle
with fluid.dygraph.guard():
x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
linear = fluid.dygraph.Linear(10, 10)
linear = paddle.nn.Linear(10, 10)
input = fluid.dygraph.to_variable(x)
scheduler = fluid.dygraph.StepDecay(0.5, step_size=3)
adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())
for epoch in range(9):
for batch_id in range(5):
out = linear(input)
loss = fluid.layers.reduce_mean(out)
loss = paddle.mean(out)
adam.minimize(loss)
scheduler.epoch()
Expand Down Expand Up @@ -1170,17 +1172,18 @@ class MultiStepDecay(_LearningRateEpochDecay):
import paddle.fluid as fluid
import numpy as np
import paddle
with fluid.dygraph.guard():
x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
linear = fluid.dygraph.Linear(10, 10)
linear = paddle.nn.Linear(10, 10)
input = fluid.dygraph.to_variable(x)
scheduler = fluid.dygraph.MultiStepDecay(0.5, milestones=[3, 5])
adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())
for epoch in range(6):
for batch_id in range(5):
out = linear(input)
loss = fluid.layers.reduce_mean(out)
loss = paddle.mean(out)
adam.minimize(loss)
scheduler.epoch()
Expand Down Expand Up @@ -1255,17 +1258,18 @@ class LambdaDecay(_LearningRateEpochDecay):
import paddle.fluid as fluid
import numpy as np
import paddle
with fluid.dygraph.guard():
x = np.random.uniform(-1, 1, [10, 10]).astype("float32")
linear = fluid.dygraph.Linear(10, 10)
linear = paddle.nn.Linear(10, 10)
input = fluid.dygraph.to_variable(x)
scheduler = fluid.dygraph.LambdaDecay(0.5, lr_lambda=lambda x: 0.95**x)
adam = fluid.optimizer.Adam(learning_rate = scheduler, parameter_list = linear.parameters())
for epoch in range(6):
for batch_id in range(5):
out = linear(input)
loss = fluid.layers.reduce_mean(out)
loss = paddle.mean(out)
adam.minimize(loss)
scheduler.epoch()
Expand Down
Loading

0 comments on commit 99a9dcd

Please sign in to comment.