Skip to content

Commit

Permalink
Fix typos, test=document_fix (#53540)
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc authored May 8, 2023
1 parent a299153 commit acefdeb
Show file tree
Hide file tree
Showing 5 changed files with 8 additions and 8 deletions.
2 changes: 1 addition & 1 deletion paddle/phi/backends/device_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ bool DeviceInterface::QueryEvent(size_t dev_id, const event::Event* event) {
return true;
}

// memery manage
// memory manage
void DeviceInterface::MemoryCopyH2D(size_t dev_id,
void* dst,
const void* src,
Expand Down
4 changes: 2 additions & 2 deletions paddle/phi/core/compat/get_kerneltype_forvar_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class KernelKey;
class DenseTensor;
/**
* Note: GetKernelTypeForVarContext is currently designed for oneDNN kernel when
* the related memeber function 'GetKernelTypeForVar' is special. It is
* the related member function 'GetKernelTypeForVar' is special. It is
* possible to leverage to other vendor libraries in the future.
*/
class GetKernelTypeForVarContext {
Expand All @@ -47,7 +47,7 @@ class GetKernelTypeForVarContext {

private:
const KernelKey* kernel_key_; // not owned
// Use AttributeMap in namespace 'phi' to avoid depending 'fuild'
// Use AttributeMap in namespace 'phi' to avoid depending 'fluid'
const AttributeMap* attrs_; // not owned
std::string* var_name_; // not owned
DenseTensor* tensor_; // not owned
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/core/compat/op_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ const static std::string deprecated_kernel_name = "deprecated"; // NOLINT

const std::unordered_set<std::string> standard_kernel_suffixs({
"sr", // SelectedRows kernel
"raw" // fallback kernel of origfinal fluid op
"raw" // fallback kernel of original fluid op
});

/**
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/blas/blas.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ namespace funcs {
* if Mat A is [BatchSize, H, W], Mat B is [BatchSize, H, W]. It will be a
* `batch_size` times of GEMM. The batched GEMM could be faster base on the
* implementation of the blas library. The batch size could be zero. If any
* matrix of `matmul` has a batch size, the will be a batched GEMM, too. e.g.,
* matrix of `matmul` has a batch size, there will be a batched GEMM, too. e.g.,
* Mat A is [BatchSize, H1, W2], and Mat B [H2, W2], The result matrix wil be
* [BatchSize, H1, W2]
*
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/funcs/blas/blaslt_impl.cu.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ namespace funcs {
// While kMatmul, kMatmulGrad, kMatmulGradWithoutBias share the same
// enum value, but if all elements for MatmulPlanner->GetKey() is same,
// no matter forward or backward, they could share the same descriptor
// cache, in that the descritpor is for decription of matmul operation.
// cache, in that the descriptor is for description of matmul operation.
enum MatmulFusedType {
kMatmul = CUBLASLT_EPILOGUE_DEFAULT,
kMatmulGrad = CUBLASLT_EPILOGUE_DEFAULT,
Expand Down Expand Up @@ -216,7 +216,7 @@ struct MatmulDescriptor {
cudaDataType_t scale_type = phi::backends::gpu::ToCudaDataType<MT>();
cublasComputeType_t compute_type = GetCudaComputeType<T>();

// Create operation desciriptor; see cublasLtMatmulDescAttributes_t for
// Create operation descriptor; see cublasLtMatmulDescAttributes_t for
// details about defaults; just need to set the transforms for A and B
PADDLE_ENFORCE_GPU_SUCCESS(
dynload::cublasLtMatmulDescCreate(&op_desc, compute_type, scale_type));
Expand Down Expand Up @@ -787,7 +787,7 @@ struct LinearGradWithCublasLt : public CublasLtBase<T> {
}
};
#else
// A void structure just for successfully complile.
// A void structure just for successfully compile.
struct MatmulPlanner {};
#endif // (PADDLE_WITH_CUDA) && CUDA_VERSION >= 11060

Expand Down

0 comments on commit acefdeb

Please sign in to comment.