Skip to content

Commit

Permalink
调整代码格式
Browse files Browse the repository at this point in the history
  • Loading branch information
BrilliantYuKaimin committed Mar 22, 2022
1 parent 59ec69b commit 95ea530
Show file tree
Hide file tree
Showing 3 changed files with 20 additions and 16 deletions.
10 changes: 5 additions & 5 deletions paddle/phi/kernels/cpu/logspace_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.

#include <cmath>
#include "paddle/phi/kernels/logspace_kernel.h"
#include <cmath>
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/data_type_transform.h"
Expand Down Expand Up @@ -52,11 +52,11 @@ void LogspaceKernel(const Context& ctx,
int half_num = num / 2;
for (int i = 0; i < num; ++i) {
if (i < half_num) {
out_data[i] = static_cast<T>(std::pow(
base_data, start_data + step * i));
out_data[i] =
static_cast<T>(std::pow(base_data, start_data + step * i));
} else {
out_data[i] = static_cast<T>(std::pow(
base_data, stop_data - step * (num - i - 1)));
out_data[i] = static_cast<T>(
std::pow(base_data, stop_data - step * (num - i - 1)));
}
}
} else {
Expand Down
8 changes: 5 additions & 3 deletions paddle/phi/kernels/gpu/logspace_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@
// limitations under the License.

#include <cmath>
#include "paddle/phi/kernels/logspace_kernel.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/data_type_transform.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/logspace_kernel.h"

namespace phi {

Expand All @@ -32,7 +32,8 @@ __global__ void LogspaceKernelInner(
if (index < size / 2) {
out[index] = static_cast<T>(std::pow(base, start + step * index));
} else {
out[index] = static_cast<T>(std::pow(base, stop - step * (size - index - 1)));
out[index] =
static_cast<T>(std::pow(base, stop - step * (size - index - 1)));
}
}
}
Expand Down Expand Up @@ -86,7 +87,8 @@ void LogspaceKernel(const Context& ctx,
LogspaceKernelInner<T><<<grid, block, 0, stream>>>(
start_data, stop_data, step, base_data, num, out_data);
} else {
LogspaceSpecialKernel<T><<<grid, block, 0, stream>>>(start_data, base_data, out_data);
LogspaceSpecialKernel<T><<<grid, block, 0, stream>>>(
start_data, base_data, out_data);
}
}

Expand Down
18 changes: 10 additions & 8 deletions python/paddle/fluid/layers/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1618,8 +1618,8 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):
with device_guard("cpu"):
tensor_base = fill_constant([1], dtype, base)
if in_dygraph_mode():
return _C_ops.logspace(tensor_start, tensor_stop, tensor_num, tensor_base, 'dtype',
dtype)
return _C_ops.logspace(tensor_start, tensor_stop, tensor_num,
tensor_base, 'dtype', dtype)

helper = LayerHelper("logspace", **locals())

Expand All @@ -1638,10 +1638,10 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):
['float32', 'float64', 'int32', 'int64'], 'logspace')
else:
check_type(stop, 'stop', (int, float), 'logspace')

if isinstance(num, Variable):
check_dtype(num.dtype, 'num', ['int32'], 'logspace')

if isinstance(base, Variable):
check_dtype(base.dtype, 'base',
['float32', 'float64', 'int32', 'int64'], 'logspace')
Expand All @@ -1665,10 +1665,12 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None):

helper.append_op(
type='logspace',
inputs={'Start': tensor_start,
'Stop': tensor_stop,
'Num': tensor_num,
'Base': tensor_base},
inputs={
'Start': tensor_start,
'Stop': tensor_stop,
'Num': tensor_num,
'Base': tensor_base
},
attrs={'dtype': dtype},
outputs={'Out': [out]})
if isinstance(num, int):
Expand Down

0 comments on commit 95ea530

Please sign in to comment.