Skip to content

Commit 044b37e

Browse files
authored
[CodeStyle][Typos][V-[1-11],W-1] Fix typos (vaccum,valud,validat,VAILD,valus,valuse,Varible,varaible,vecotr,vesion,verson,Vetical,vunerability,varn) (#70680)
* fix * fix
1 parent b8d5f67 commit 044b37e

File tree

19 files changed

+63
-73
lines changed

19 files changed

+63
-73
lines changed

SECURITY.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,7 @@ PaddlePaddle always take code security seriously. However, due to the complexity
2525

2626
### Code security tools
2727

28-
PaddlePaddle security team attaches great importance to the security of the framework. In order to find and fix security issues as soon as possible, we are continuously conducting code security audit and developing automatic vunerability discovery tools. We have already open sourced some of them to the community, hoping this could encourage people to contribute and improve the safety and robustness of PaddlePaddle. [This tool](https://github.com/PaddlePaddle/PaddleSleeve/tree/main/CodeSecurity) includes two parts. The dynamic part includes some op fuzzer samples. And the static part includes some CodeQL samples. Both of them are aim to find vulnerabilities in PaddlePaddle framework codebase. By referring the samples, security researchers can write their own fuzzers or QLs to test more PaddlePaddle modules, and find more code security issues.
28+
PaddlePaddle security team attaches great importance to the security of the framework. In order to find and fix security issues as soon as possible, we are continuously conducting code security audit and developing automatic vulnerability discovery tools. We have already open sourced some of them to the community, hoping this could encourage people to contribute and improve the safety and robustness of PaddlePaddle. [This tool](https://github.com/PaddlePaddle/PaddleSleeve/tree/main/CodeSecurity) includes two parts. The dynamic part includes some op fuzzer samples. And the static part includes some CodeQL samples. Both of them are aim to find vulnerabilities in PaddlePaddle framework codebase. By referring the samples, security researchers can write their own fuzzers or QLs to test more PaddlePaddle modules, and find more code security issues.
2929

3030
### Reporting vulnerabilities
3131

_typos.toml

+1-13
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,7 @@ Halfs = 'Halfs'
4343
kinf = 'kinf'
4444
pash = 'pash'
4545
unpacket = "unpacket"
46+
vaccum = 'vaccum'
4647

4748
# These words need to be fixed
4849
Indexs = 'Indexs'
@@ -292,19 +293,6 @@ unsed = 'unsed'
292293
uesd = 'uesd'
293294
usefull = 'usefull'
294295
usless = 'usless'
295-
vaccum = 'vaccum'
296-
valud = 'valud'
297-
VAILD = 'VAILD'
298-
valus = 'valus'
299-
valuse = 'valuse'
300-
Varible = 'Varible'
301-
varaible = 'varaible'
302-
vecotr = 'vecotr'
303-
verson = 'verson'
304-
vesion = 'vesion'
305-
Vetical = 'Vetical'
306-
vunerability = 'vunerability'
307-
varn = 'varn'
308296
warpped = 'warpped'
309297
warpper = 'warpper'
310298
Warpper = 'Warpper'

paddle/cinn/hlir/framework/pir/fusion_info.cc

+1-1
Original file line numberDiff line numberDiff line change
@@ -175,7 +175,7 @@ void FusionInfo::ParseOpInfos(const OpLoweringGroup& group) {
175175
void FusionInfo::ParseInputDimExprs(const OpLoweringGroup& group) {
176176
// NOTE(Aurelius84): [Why try get DimExpr from Group firstly? ]
177177
// In case of BroadcastTree, we will clone many Groups containing same ops.
178-
// But its input valus is defining outside and will have same DimExprs in
178+
// But its input values is defining outside and will have same DimExprs in
179179
// global ShapeAnalysis, which leading hash conflict unexpected.
180180
const auto TryGetDimExprsFromGroup = [&](const ::pir::Value& value) -> bool {
181181
if (!group.HasShapeOrDataExprs(value)) return false;

paddle/cinn/operator_fusion/pir_graph_analyzing/dim_relation.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,12 @@
2020
namespace cinn::fusion {
2121

2222
ValueUsage GetValueUsage(const pir::Value& v, const size_t usage_idx) {
23-
ValueUsage valud_dim;
23+
ValueUsage value_dim;
2424
size_t rank = GetRank(v);
2525
for (size_t i = 0; i < rank; ++i) {
26-
valud_dim.emplace_back(v, i, usage_idx);
26+
value_dim.emplace_back(v, i, usage_idx);
2727
}
28-
return valud_dim;
28+
return value_dim;
2929
}
3030

3131
static std::vector<ValueUsage> GetInputValueUsage(pir::Operation* op) {

paddle/fluid/framework/data_feed.cu

+12-12
Original file line numberDiff line numberDiff line change
@@ -1804,7 +1804,7 @@ int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk,
18041804
slot_num);
18051805

18061806
std::vector<std::shared_ptr<phi::Allocation>> ins_slot_num(slot_num, nullptr);
1807-
std::vector<uint64_t *> ins_slot_num_vecotr(slot_num, NULL);
1807+
std::vector<uint64_t *> ins_slot_num_vector(slot_num, NULL);
18081808
std::shared_ptr<phi::Allocation> d_ins_slot_num_vector =
18091809
memory::AllocShared(place_, (slot_num) * sizeof(uint64_t *));
18101810
uint64_t **d_ins_slot_num_vector_ptr =
@@ -1815,15 +1815,15 @@ int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk,
18151815
if ((*feed_info_)[feed_vec_idx + 2 * i].type[0] == 'u') {
18161816
ins_slot_num[ii] =
18171817
memory::AllocShared(place_, key_num * sizeof(uint64_t));
1818-
ins_slot_num_vecotr[ii] =
1818+
ins_slot_num_vector[ii] =
18191819
reinterpret_cast<uint64_t *>(ins_slot_num[ii]->ptr());
18201820
ii++;
18211821
}
18221822
}
18231823
if (slot_num > 0) {
18241824
CUDA_CHECK(
18251825
cudaMemcpyAsync(reinterpret_cast<char *>(d_ins_slot_num_vector_ptr),
1826-
ins_slot_num_vecotr.data(),
1826+
ins_slot_num_vector.data(),
18271827
sizeof(uint64_t *) * slot_num,
18281828
cudaMemcpyHostToDevice,
18291829
train_stream_));
@@ -1844,7 +1844,7 @@ int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk,
18441844
size_t temp_storage_bytes = 0;
18451845
CUDA_CHECK(cub::DeviceScan::InclusiveSum(NULL,
18461846
temp_storage_bytes,
1847-
ins_slot_num_vecotr[0],
1847+
ins_slot_num_vector[0],
18481848
slot_lod_tensor_ptr_[0] + 1,
18491849
key_num,
18501850
train_stream_));
@@ -1863,7 +1863,7 @@ int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk,
18631863
slot_lod_tensor_ptr_[ii], 0, sizeof(uint64_t), train_stream_));
18641864
CUDA_CHECK(cub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
18651865
temp_storage_bytes,
1866-
ins_slot_num_vecotr[ii],
1866+
ins_slot_num_vector[ii],
18671867
slot_lod_tensor_ptr_[ii] + 1,
18681868
key_num,
18691869
train_stream_));
@@ -1893,7 +1893,7 @@ int GraphDataGenerator::FillSlotFeature(uint64_t *d_walk,
18931893
d_feature_list_ptr,
18941894
d_feature_size_prefixsum_ptr,
18951895
d_each_ins_slot_num_inner_prefix_ptr,
1896-
ins_slot_num_vecotr[ii],
1896+
ins_slot_num_vector[ii],
18971897
slot_lod_tensor_ptr_[ii],
18981898
slot_tensor_ptr_[ii],
18991899
ii,
@@ -2109,7 +2109,7 @@ int GraphDataGenerator::FillFloatFeature(uint64_t *d_walk,
21092109

21102110
std::vector<std::shared_ptr<phi::Allocation>> ins_slot_num(float_slot_num_,
21112111
nullptr);
2112-
std::vector<uint64_t *> ins_slot_num_vecotr(float_slot_num_, NULL);
2112+
std::vector<uint64_t *> ins_slot_num_vector(float_slot_num_, NULL);
21132113
std::shared_ptr<phi::Allocation> d_ins_slot_num_vector =
21142114
memory::AllocShared(place_, (float_slot_num_) * sizeof(uint64_t *));
21152115
uint64_t **d_ins_slot_num_vector_ptr =
@@ -2120,7 +2120,7 @@ int GraphDataGenerator::FillFloatFeature(uint64_t *d_walk,
21202120
if ((*feed_info_)[feed_vec_idx + 2 * i].type[0] == 'f') {
21212121
ins_slot_num[ii] =
21222122
memory::AllocShared(place_, key_num * sizeof(uint64_t));
2123-
ins_slot_num_vecotr[ii] =
2123+
ins_slot_num_vector[ii] =
21242124
reinterpret_cast<uint64_t *>(ins_slot_num[ii]->ptr());
21252125
ii++;
21262126
}
@@ -2129,7 +2129,7 @@ int GraphDataGenerator::FillFloatFeature(uint64_t *d_walk,
21292129
if (float_slot_num_ > 0) {
21302130
CUDA_CHECK(
21312131
cudaMemcpyAsync(reinterpret_cast<char *>(d_ins_slot_num_vector_ptr),
2132-
ins_slot_num_vecotr.data(),
2132+
ins_slot_num_vector.data(),
21332133
sizeof(uint64_t *) * float_slot_num_,
21342134
cudaMemcpyHostToDevice,
21352135
train_stream_));
@@ -2152,7 +2152,7 @@ int GraphDataGenerator::FillFloatFeature(uint64_t *d_walk,
21522152
size_t temp_storage_bytes = 0;
21532153
CUDA_CHECK(cub::DeviceScan::InclusiveSum(NULL,
21542154
temp_storage_bytes,
2155-
ins_slot_num_vecotr[0],
2155+
ins_slot_num_vector[0],
21562156
slot_lod_tensor_ptr_[0] + 1,
21572157
key_num,
21582158
train_stream_));
@@ -2170,7 +2170,7 @@ int GraphDataGenerator::FillFloatFeature(uint64_t *d_walk,
21702170
slot_lod_tensor_ptr_[ii], 0, sizeof(uint64_t), train_stream_));
21712171
CUDA_CHECK(cub::DeviceScan::InclusiveSum(d_temp_storage->ptr(),
21722172
temp_storage_bytes,
2173-
ins_slot_num_vecotr[ii],
2173+
ins_slot_num_vector[ii],
21742174
slot_lod_tensor_ptr_[ii] + 1,
21752175
key_num,
21762176
train_stream_));
@@ -2200,7 +2200,7 @@ int GraphDataGenerator::FillFloatFeature(uint64_t *d_walk,
22002200
d_feature_list_ptr,
22012201
d_feature_size_prefixsum_ptr,
22022202
d_each_ins_slot_num_inner_prefix_ptr,
2203-
ins_slot_num_vecotr[ii],
2203+
ins_slot_num_vector[ii],
22042204
slot_lod_tensor_ptr_[ii],
22052205
slot_tensor_ptr_[ii],
22062206
ii,

paddle/fluid/pir/dialect/op_generator/op_gen.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -1191,7 +1191,7 @@ def GenOneDnnExtraAttrsDefaultValue(onednn_extra_args):
11911191
"""
11921192
ARRAY_ATTRIBUTE_TEMPLATE = """ std::vector<pir::Attribute> vec_{attr_name};
11931193
{{
1194-
std::vector<{cpp_type}> vec_values = {attr_valuse};
1194+
std::vector<{cpp_type}> vec_values = {attr_values};
11951195
for (size_t i = 0; i < static_cast<size_t>(vec_values.size()); i++) {{
11961196
{create_attribute}
11971197
vec_{attr_name}.push_back(attr_{attr_name});
@@ -1215,7 +1215,7 @@ def GenOneDnnExtraAttrsDefaultValue(onednn_extra_args):
12151215
cpp_type=onednn_extra_args[idx]['typename'].replace(
12161216
'[]', ''
12171217
),
1218-
attr_valuse=onednn_extra_args[idx]['default_value'],
1218+
attr_values=onednn_extra_args[idx]['default_value'],
12191219
create_attribute=INTARRAY_STR_TEMPLATE.format(
12201220
attr_name=onednn_extra_args[idx]['name'],
12211221
op_attribute_type=inner_attribute_type,
@@ -1228,7 +1228,7 @@ def GenOneDnnExtraAttrsDefaultValue(onednn_extra_args):
12281228
cpp_type=onednn_extra_args[idx]['typename'].replace(
12291229
'[]', ''
12301230
),
1231-
attr_valuse=onednn_extra_args[idx]['default_value'],
1231+
attr_values=onednn_extra_args[idx]['default_value'],
12321232
create_attribute=SCALAR_STR_TEMPLATE.format(
12331233
attr_name=onednn_extra_args[idx]['name'],
12341234
attr="vec_values[i]",
@@ -1240,7 +1240,7 @@ def GenOneDnnExtraAttrsDefaultValue(onednn_extra_args):
12401240
cpp_type=onednn_extra_args[idx]['typename'].replace(
12411241
'[]', ''
12421242
),
1243-
attr_valuse=onednn_extra_args[idx]['default_value'],
1243+
attr_values=onednn_extra_args[idx]['default_value'],
12441244
create_attribute=STR_TEMPLATE.format(
12451245
attr_name=onednn_extra_args[idx]['name'],
12461246
op_attribute_type=inner_attribute_type,

paddle/fluid/platform/device/ipu/ipu_utils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ const popart::DataType OnnxDType2PopartType(const ONNXDataType type);
104104
const ONNXDataType VarType2OnnxDType(const VarType::Type type);
105105
// VarType::Type to String in Popart
106106
const std::string VarType2PopartStr(const VarType::Type type);
107-
// Get bool from environment varaible
107+
// Get bool from environment variable
108108
const bool GetBoolEnv(const std::string& str);
109109
// Request number of ipus must be pow(2, n)
110110
const int RequestIpus(const int num_ipus);

paddle/phi/kernels/funcs/norm_utils.cu.h

+7-7
Original file line numberDiff line numberDiff line change
@@ -667,12 +667,12 @@ void NormDoubleGradFunctor(const DeviceContext &ctx,
667667
}
668668

669669
template <typename T, typename BnT>
670-
__device__ __forceinline__ void BlockReduceByVetical(BnT x_sum,
671-
BnT x_square_sum,
672-
BnT *smem_sum,
673-
BnT *smem_square_sum,
674-
BnT *x_sum_out,
675-
BnT *x_square_sum_out) {
670+
__device__ __forceinline__ void BlockReduceByVertical(BnT x_sum,
671+
BnT x_square_sum,
672+
BnT *smem_sum,
673+
BnT *smem_square_sum,
674+
BnT *x_sum_out,
675+
BnT *x_square_sum_out) {
676676
int tid = threadIdx.x + threadIdx.y * blockDim.x;
677677
#pragma unroll
678678
for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) {
@@ -733,7 +733,7 @@ __device__ __forceinline__ void ReduceSumPost(const int C, // channels
733733
}
734734

735735
// vertical block sum
736-
funcs::BlockReduceByVetical<T, BnT>(
736+
funcs::BlockReduceByVertical<T, BnT>(
737737
*sum1, *sum2, &cache1[0], &cache2[0], sum1, sum2);
738738
}
739739
}

paddle/phi/kernels/funcs/sync_batch_norm_utils.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ __global__ void KeBackwardLocalStats2D(const T *dy,
193193
auto x_i = static_cast<BatchNormParamType<T>>(x[id]);
194194
sum2 += g * (x_i - mean);
195195
}
196-
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(
196+
funcs::BlockReduceByVertical<T, BatchNormParamType<T>>(
197197
sum1, sum2, &smem_sum[0], &smem_square_sum[0], &sum1, &sum2);
198198

199199
if (gridDim.y > 1) {
@@ -304,7 +304,7 @@ static __global__ void KeBNBackwardScaleBias2D(
304304
db_sum += dy_i;
305305
}
306306

307-
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(
307+
funcs::BlockReduceByVertical<T, BatchNormParamType<T>>(
308308
ds_sum, db_sum, &smem_sum[0], &smem_square_sum[0], &ds_sum, &db_sum);
309309

310310
if (gridDim.y > 1) {

paddle/phi/kernels/fusion/fp8_gemm/fp8_gemm_with_cublasLt/cublaslt_gemm.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -328,7 +328,7 @@ void cublaslt_fp8_fp8_fp16_gemm(
328328
PADDLE_ENFORCE_EQ(bias->dims()[0] == n,
329329
true,
330330
common::errors::InvalidArgument(
331-
"FP8 gemm bias_vecotr_dim needs to equal "
331+
"FP8 gemm bias_vector_dim needs to equal "
332332
"to n, n = %d, but bias_vector_dim = %d",
333333
n,
334334
bias->dims()[0]));
@@ -385,7 +385,7 @@ void cublaslt_fp8_fp8_bf16_gemm(
385385
PADDLE_ENFORCE_EQ(bias->dims()[0] == n,
386386
true,
387387
common::errors::InvalidArgument(
388-
"FP8 gemm bias_vecotr_dim needs to equal "
388+
"FP8 gemm bias_vector_dim needs to equal "
389389
"to n, n = %d, but bias_vector_dim = %d",
390390
n,
391391
bias->dims()[0]));

paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu

+7-7
Original file line numberDiff line numberDiff line change
@@ -286,12 +286,12 @@ static __global__ void BNBackward2DChannelLastStage1(
286286
}
287287

288288
// vertical block sum
289-
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(x_sum,
290-
x_square_sum,
291-
&smem_sum[0],
292-
&smem_square_sum[0],
293-
&x_sum,
294-
&x_square_sum);
289+
funcs::BlockReduceByVertical<T, BatchNormParamType<T>>(x_sum,
290+
x_square_sum,
291+
&smem_sum[0],
292+
&smem_square_sum[0],
293+
&x_sum,
294+
&x_square_sum);
295295

296296
if (gridDim.y > 1) {
297297
__shared__ bool is_last_block_done;
@@ -366,7 +366,7 @@ static __global__ void BNBackward2DChannelLastStage2(
366366
}
367367

368368
// vertical block sum
369-
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(
369+
funcs::BlockReduceByVertical<T, BatchNormParamType<T>>(
370370
ds_sum, db_sum, &smem_ds_sum[0], &smem_db_sum[0], &ds_sum, &db_sum);
371371

372372
if (gridDim.y > 1) {

paddle/phi/kernels/gpu/batch_norm_kernel.cu

+6-6
Original file line numberDiff line numberDiff line change
@@ -247,12 +247,12 @@ static __global__ void BNForwardTraining2DChannelLastCompStat(
247247
}
248248

249249
// vertical block sum
250-
funcs::BlockReduceByVetical<T, BatchNormParamType<T>>(x_sum,
251-
x_square_sum,
252-
&smem_sum[0],
253-
&smem_square_sum[0],
254-
&x_sum,
255-
&x_square_sum);
250+
funcs::BlockReduceByVertical<T, BatchNormParamType<T>>(x_sum,
251+
x_square_sum,
252+
&smem_sum[0],
253+
&smem_square_sum[0],
254+
&x_sum,
255+
&x_square_sum);
256256

257257
if (gridDim.y > 1) {
258258
__shared__ bool is_last_block_done;

python/paddle/distributed/fleet/base/util_factory.py

+9-7
Original file line numberDiff line numberDiff line change
@@ -738,28 +738,30 @@ def need_highlight(name: str) -> bool:
738738
# TODO(gongwb): format the var.type
739739
# create var
740740
if var.persistable:
741-
varn = graph.add_param(
741+
var_name = graph.add_param(
742742
var.name,
743743
str(var.type).replace("\n", "<br />", 1),
744744
highlight=need_highlight(var.name),
745745
)
746746
else:
747-
varn = graph.add_arg(var.name, highlight=need_highlight(var.name))
748-
vars[var.name] = varn
747+
var_name = graph.add_arg(
748+
var.name, highlight=need_highlight(var.name)
749+
)
750+
vars[var.name] = var_name
749751

750752
def add_op_link_var(op, var, op2var=False):
751753
for arg in var.arguments:
752754
if arg not in vars:
753755
# add missing variables as argument
754756
vars[arg] = graph.add_arg(arg, highlight=need_highlight(arg))
755-
varn = vars[arg]
757+
var_name = vars[arg]
756758
highlight = need_highlight(op.description) or need_highlight(
757-
varn.description
759+
var_name.description
758760
)
759761
if op2var:
760-
graph.add_edge(op, varn, highlight=highlight)
762+
graph.add_edge(op, var_name, highlight=highlight)
761763
else:
762-
graph.add_edge(varn, op, highlight=highlight)
764+
graph.add_edge(var_name, op, highlight=highlight)
763765

764766
for op in desc.ops:
765767
opn = graph.add_op(op.type, highlight=need_highlight(op.type))

python/setup.py.in

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def git_commit() -> str:
4040
return str(git_commit)
4141

4242
def _get_version_detail(idx):
43-
assert idx < 3, "vesion info consists of %(major)d.%(minor)d.%(patch)d, \
43+
assert idx < 3, "version info consists of %(major)d.%(minor)d.%(patch)d, \
4444
so detail index must less than 3"
4545

4646
if re.match(r'@TAG_VERSION_REGEX@', '@PADDLE_VERSION@'):

python/setup_cinn.py.in

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ def git_commit():
2424
return str(git_commit)
2525

2626
def _get_version_detail(idx):
27-
assert idx < 3, "vesion info consists of %(major)d.%(minor)d.%(patch)d, \
27+
assert idx < 3, "version info consists of %(major)d.%(minor)d.%(patch)d, \
2828
so detail index must less than 3"
2929

3030
if re.match(r'${TAG_VERSION_REGEX}', '${PADDLE_VERSION}'):

test/cpp/utils/span_test.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -996,7 +996,7 @@ TEST(iterator, span) {
996996
PADDLE_ENFORCE_EQ(
997997
std::is_sorted(vec.cbegin(), vec.cend()),
998998
true,
999-
common::errors::Fatal("Varible `vec` should be sorted, please check"));
999+
common::errors::Fatal("Variable `vec` should be sorted, please check"));
10001000
}
10011001

10021002
{
@@ -1006,7 +1006,7 @@ TEST(iterator, span) {
10061006
std::equal(s.rbegin(), s.rend(), vec.crbegin()),
10071007
true,
10081008
common::errors::Fatal(
1009-
"Varible `s` is not equal to its self by using rbegin(), rend() "
1009+
"Variable `s` is not equal to its self by using rbegin(), rend() "
10101010
"and crbegin() with std::equal, please check related function"));
10111011
}
10121012
}

0 commit comments

Comments
 (0)