Skip to content

Commit fbc9a6a

Browse files
yinfan98SigureMo
andauthored
[CodeStyle][Typos][F-[12-17],F-[19-24],F-[26-28]] Fix typo(Flattend,flattend,flattern,Flattern,filpped,flaot,follwed,folowing,formater,formating,foramt,formt,formate,forwad,forwrad,forword,founf,framwork,frequence,fron,fullfill) (#70646)
--------- Co-authored-by: Nyakku Shigure <[email protected]>
1 parent 80c376f commit fbc9a6a

File tree

28 files changed

+138
-158
lines changed

28 files changed

+138
-158
lines changed

CONTRIBUTING.md

+2-2
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ PaddlePaddle uses this [Git branching model](http://nvie.com/posts/a-successful-
3939
pre-commit install
4040
```
4141

42-
Our pre-commit configuration requires clang-format 3.8 for auto-formating C/C++ code and yapf for Python.
42+
Our pre-commit configuration requires clang-format 3.8 for auto-formatting C/C++ code and yapf for Python.
4343

4444
Once installed, `pre-commit` checks the style of code and documentation in every commit. We will see something like the following when you run `git commit`:
4545

@@ -52,7 +52,7 @@ PaddlePaddle uses this [Git branching model](http://nvie.com/posts/a-successful-
5252
Check for broken symlinks................................................Passed
5353
Detect Private Key...................................(no files to check)Skipped
5454
Fix End of Files.....................................(no files to check)Skipped
55-
clang-formater.......................................(no files to check)Skipped
55+
clang-format.........................................(no files to check)Skipped
5656
[my-cool-stuff c703c041] add test file
5757
1 file changed, 0 insertions(+), 0 deletions(-)
5858
create mode 100644 233

_typos.toml

-21
Original file line numberDiff line numberDiff line change
@@ -46,27 +46,6 @@ dobule = 'dobule'
4646
Dowloading = 'Dowloading'
4747
downsteram = 'downsteram'
4848
fetchs = 'fetchs'
49-
Flattend = 'Flattend'
50-
flattend = 'flattend'
51-
flattern = 'flattern'
52-
Flattern = 'Flattern'
53-
filpped = 'filpped'
54-
flaot = 'flaot'
55-
follwed = 'follwed'
56-
folowing = 'folowing'
57-
formater = 'formater'
58-
formating = 'formating'
59-
foramt = 'foramt'
60-
formate = 'formate'
61-
formt = 'formt'
62-
forwrad = 'forwrad'
63-
forwad = 'forwad'
64-
forword = 'forword'
65-
founf = 'founf'
66-
framwork = 'framwork'
67-
frequence = 'frequence'
68-
fron = 'fron'
69-
fullfill = 'fullfill'
7049
Indexs = 'Indexs'
7150
indexs = 'indexs'
7251
indiates = 'indiates'

paddle/cinn/common/ir_util.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -191,7 +191,7 @@ inline void UnpackReduction(const ir::IndexExpr &expr, FLeaf fleaf) {
191191
}
192192

193193
/*!
194-
* \brief Flattern the expression into a vector of expressions splited by `Add`
194+
* \brief Flatten the expression into a vector of expressions splited by `Add`
195195
* or `Mul`.
196196
*
197197
* For example (Add):

paddle/cinn/runtime/cuda/cuda_util.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -1742,7 +1742,7 @@ void cinn_call_cholesky_nvgpu(void *v_args,
17421742
cinn_buffer_t *x = args[0].operator cinn_buffer_t *();
17431743
cinn_buffer_t *out = args[1].operator cinn_buffer_t *();
17441744
// In cuSOLVER, dense matrix stores in COL_MAJOR, thus FILL_MODE needs to be
1745-
// filpped. See also:
1745+
// flipped. See also:
17461746
// https://docs.nvidia.com/cuda/cusolver/index.html#matrix-dense-format
17471747
cublasFillMode_t uplo =
17481748
upper ? CUBLAS_FILL_MODE_LOWER : CUBLAS_FILL_MODE_UPPER;

paddle/fluid/framework/data_transform.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ phi::GetKernelTypeForVarContext BuildGetKernelTypeForVarContext(
173173
if (has_infer_varkernel_fn) {
174174
for (auto &attr : fluid_attrs) {
175175
switch (attr.second.index()) {
176-
case 3: // string type in framwork::Attribute
176+
case 3: // string type in framework::Attribute
177177
(*phi_attrs)[attr.first] = PADDLE_GET_CONST(std::string, attr.second);
178178
break;
179179
default:

paddle/fluid/framework/new_executor/pir_interpreter.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -979,7 +979,7 @@ void PirInterpreter::BuildInstruction() {
979979
}
980980

981981
std::string PirInterpreter::DebugInstructions() {
982-
// log formate: var[101] = pd_op.relu(var[100]) or for inplace op var[100] =
982+
// log format: var[101] = pd_op.relu(var[100]) or for inplace op var[100] =
983983
// pd_op.relu_(var[100])
984984
std::stringstream ss;
985985
ss << "{outputs}"

paddle/fluid/inference/tensorrt/op_teller.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -3467,9 +3467,9 @@ struct CustomGenericPluginTeller : public Teller {
34673467
"SetTrtInferShapeFn.";
34683468
return false;
34693469
}
3470-
auto& trt_supports_formate_config =
3470+
auto& trt_supports_format_config =
34713471
OpMetaInfoHelper::GetTrtSupportsFormatConfig(op_info);
3472-
if (trt_supports_formate_config.empty()) {
3472+
if (trt_supports_format_config.empty()) {
34733473
VLOG(3)
34743474
<< op_type
34753475
<< " has no trt supportsFormatCombination config. Please set by "

paddle/fluid/inference/tensorrt/plugin/custom_generic_plugin.cu

+3-3
Original file line numberDiff line numberDiff line change
@@ -311,9 +311,9 @@ bool CustomGenericPlugin::supportsFormatCombination(
311311
auto& op_meta_info_map = OpMetaInfoMap::Instance();
312312
const auto& meta_info_map = op_meta_info_map.GetMap();
313313
auto& op_info = meta_info_map.at(op_desc_.Type()).front();
314-
auto& supports_formate_config =
314+
auto& supports_format_config =
315315
OpMetaInfoHelper::GetTrtSupportsFormatConfig(op_info);
316-
PADDLE_ENFORCE_NE(supports_formate_config.empty(),
316+
PADDLE_ENFORCE_NE(supports_format_config.empty(),
317317
true,
318318
common::errors::InvalidArgument(
319319
"The %s op has no tensorrt plugin "
@@ -325,7 +325,7 @@ bool CustomGenericPlugin::supportsFormatCombination(
325325
size_t output_num = OpMetaInfoHelper::GetOutputs(op_info).size();
326326
std::vector<std::vector<std::pair<std::string, std::string>>>
327327
format_combinations;
328-
for (auto& config : supports_formate_config) {
328+
for (auto& config : supports_format_config) {
329329
auto format_combination = parseConfig(op_desc_.Type(), config);
330330
PADDLE_ENFORCE_EQ(input_num + output_num,
331331
format_combination.size(),

paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ __device__ half do_tanh<half>(half a) {
7777
return __float2half(tmp);
7878
}
7979

80-
// the kernel below is not aligned with fluid fp32 forwrad ones, use it for
80+
// the kernel below is not aligned with fluid fp32 forward ones, use it for
8181
// fp16.
8282
template <typename T, unsigned TPB>
8383
__global__ void no_exact_gelu_kernel(

paddle/fluid/pir/dialect/operator/interface/infer_symbolic_shape/multiary_infer_sym.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -2631,7 +2631,7 @@ bool GroupNormOpInferSymbolicShape(
26312631
channel_idx = 1;
26322632
} else {
26332633
PADDLE_THROW(common::errors::Unimplemented(
2634-
"GroupNorm only suport NHWC and NCHW data formt"));
2634+
"GroupNorm only suport NHWC and NCHW data format"));
26352635
}
26362636

26372637
symbol::DimExpr channel_dim = x_shape.shape()[channel_idx];

paddle/phi/kernels/cpu/unique_kernel.cc

+9-9
Original file line numberDiff line numberDiff line change
@@ -83,15 +83,15 @@ void UniqueRawKernel(const Context& context,
8383
if (axis.empty()) {
8484
phi::VisitDataTypeTiny(
8585
dtype,
86-
phi::funcs::UniqueFlattendTensorFunctor<Context, T>(context,
87-
x,
88-
out,
89-
indices,
90-
index,
91-
counts,
92-
return_index,
93-
return_inverse,
94-
return_counts));
86+
phi::funcs::UniqueFlattenedTensorFunctor<Context, T>(context,
87+
x,
88+
out,
89+
indices,
90+
index,
91+
counts,
92+
return_index,
93+
return_inverse,
94+
return_counts));
9595
} else {
9696
int axis_value = axis[0];
9797
axis_value = (axis_value == -1) ? (x.dims().size() - 1) : axis_value;

paddle/phi/kernels/funcs/math_cuda_utils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -298,7 +298,7 @@ __inline__ __device__ T PartialWarpReduceMin(T val, warp_mask_t lane_mask) {
298298
T warp_val = __shfl_sync(lane_mask, val, 0, warpSize);
299299
#else
300300
T warp_val = __shfl(
301-
val, 0, warpSize); // To fullfill the data in each thread of this warp.
301+
val, 0, warpSize); // To fulfill the data in each thread of this warp.
302302
#endif
303303
warp_val = val;
304304

paddle/phi/kernels/funcs/unique_functor.h

+28-28
Original file line numberDiff line numberDiff line change
@@ -130,15 +130,15 @@ static bool Equal(const DenseTensor& a, const DenseTensor& b) {
130130
}
131131

132132
template <typename Context, typename InT, typename IndexT>
133-
static void UniqueFlattendTensor(const Context& context,
134-
const DenseTensor& in,
135-
DenseTensor* out,
136-
DenseTensor* indices,
137-
DenseTensor* index,
138-
DenseTensor* count,
139-
bool return_index,
140-
bool return_inverse,
141-
bool return_counts) {
133+
static void UniqueFlattenedTensor(const Context& context,
134+
const DenseTensor& in,
135+
DenseTensor* out,
136+
DenseTensor* indices,
137+
DenseTensor* index,
138+
DenseTensor* count,
139+
bool return_index,
140+
bool return_inverse,
141+
bool return_counts) {
142142
const InT* in_data = in.data<InT>();
143143
std::set<InT> unique(in_data, in_data + in.numel());
144144
out->Resize(common::make_ddim({static_cast<int64_t>(unique.size())}));
@@ -327,7 +327,7 @@ static void UniqueDim(const Context& context,
327327
}
328328

329329
template <typename Context, typename InT>
330-
struct UniqueFlattendTensorFunctor {
330+
struct UniqueFlattenedTensorFunctor {
331331
const Context& ctx_; /* */
332332
const DenseTensor& in_;
333333
DenseTensor* out_;
@@ -338,15 +338,15 @@ struct UniqueFlattendTensorFunctor {
338338
const bool return_inverse_;
339339
const bool return_counts_;
340340

341-
UniqueFlattendTensorFunctor(const Context& context,
342-
const DenseTensor& in,
343-
DenseTensor* out,
344-
DenseTensor* indices,
345-
DenseTensor* index,
346-
DenseTensor* count,
347-
bool return_index,
348-
bool return_inverse,
349-
bool return_counts)
341+
UniqueFlattenedTensorFunctor(const Context& context,
342+
const DenseTensor& in,
343+
DenseTensor* out,
344+
DenseTensor* indices,
345+
DenseTensor* index,
346+
DenseTensor* count,
347+
bool return_index,
348+
bool return_inverse,
349+
bool return_counts)
350350
: ctx_(context),
351351
in_(in),
352352
out_(out),
@@ -359,15 +359,15 @@ struct UniqueFlattendTensorFunctor {
359359

360360
template <typename IndexT>
361361
void apply() const {
362-
UniqueFlattendTensor<Context, InT, IndexT>(ctx_,
363-
in_,
364-
out_,
365-
indices_,
366-
index_,
367-
count_,
368-
return_index_,
369-
return_inverse_,
370-
return_counts_);
362+
UniqueFlattenedTensor<Context, InT, IndexT>(ctx_,
363+
in_,
364+
out_,
365+
indices_,
366+
index_,
367+
count_,
368+
return_index_,
369+
return_inverse_,
370+
return_counts_);
371371
}
372372
};
373373

paddle/phi/kernels/gpu/rms_norm_funcs.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ limitations under the License. */
1414

1515
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
1616

17-
/*This code is copied fron NVIDIA apex:
17+
/*This code is copied from NVIDIA apex:
1818
* https://github.com/NVIDIA/apex
1919
* with minor changes. */
2020

paddle/phi/kernels/gpu/rms_norm_grad_kernel.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
1313
limitations under the License. */
1414

1515
/* Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. */
16-
/*This code is copied fron NVIDIA apex:
16+
/*This code is copied from NVIDIA apex:
1717
* https://github.com/NVIDIA/apex
1818
* with minor changes. */
1919

paddle/phi/kernels/gpu/unique_consecutive_functor.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232

3333
namespace phi {
3434

35-
// The core logic of computing Unique Consecutive for a flattend Tensor
35+
// The core logic of computing Unique Consecutive for a flattened Tensor
3636
template <typename Context,
3737
typename InT,
3838
typename IndexT,
@@ -113,7 +113,7 @@ static void UniqueConsecutiveFlattenedCUDATensor(const Context& context,
113113
}
114114
}
115115

116-
// functor for processing a flattend Tensor
116+
// functor for processing a flattened Tensor
117117
template <typename Context, typename InT>
118118
struct UniqueConsecutiveFlattenedCUDAFunctor {
119119
const Context& ctx_;

0 commit comments

Comments
 (0)