Skip to content

Commit 7a61306

Browse files
goldsboroughfacebook-github-bot
authored andcommitted
Enable all clang-tidy performance checks (pytorch#15198)
Summary: This PR adds the final set of clang-tidy checks we should add for our codebase: a last set of performance-related checks. Most fixes here are around changing `auto` to `const auto&` in a few places where unnecessary copies were made, and adding `reserve()` calls before loops doing repeated `push_back()`. Also a few cases of calling `std::string::find` with a single-character string literal instead of a single char, which uses a less efficient string search algorithm meant for searching larger substrings. ![image](https://user-images.githubusercontent.com/6429851/49978940-adc1a780-ff01-11e8-99da-a4e431361f07.png) ezyang apaszke Pull Request resolved: pytorch#15198 Differential Revision: D13468797 Pulled By: goldsborough fbshipit-source-id: 2bed1ea1c7c162b7f3e0e1026f17125e88c4d5b2
1 parent fc2856e commit 7a61306

37 files changed

+75
-76
lines changed

.clang-tidy

+2-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,8 @@ Checks: '
2424
,-modernize-use-auto
2525
,-modernize-use-default-member-init
2626
,-modernize-use-using
27-
,performance-unnecessary-value-param
27+
,performance-*
28+
,-performance-noexcept-move-constructor
2829
'
2930
WarningsAsErrors: '*'
3031
HeaderFilterRegex: 'torch/csrc/.*'

torch/csrc/Exceptions.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ struct python_error : public std::exception {
5050
}
5151

5252
python_error(python_error&& other) {
53-
type = std::move(other.type);
53+
type = other.type;
5454
value = other.value;
5555
traceback = other.traceback;
5656
other.type = nullptr;

torch/csrc/api/include/torch/nn/module.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
150150
/// \endrst
151151
void apply(
152152
const NamedModuleApplyFunction& function,
153-
std::string name_prefix = std::string());
153+
const std::string& name_prefix = std::string());
154154

155155
/// Applies the `function` to the `Module` and recursively to every submodule.
156156
/// The function must accept a `const std::string&` for the key of the module,
@@ -167,7 +167,7 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
167167
/// \endrst
168168
void apply(
169169
const ConstNamedModuleApplyFunction& function,
170-
std::string name_prefix = std::string()) const;
170+
const std::string& name_prefix = std::string()) const;
171171

172172
/// Applies the `function` to the `Module` and recursively to every submodule.
173173
/// The function must accept a `const std::shared_ptr<Module>&`.
@@ -198,7 +198,7 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
198198
/// \endrst
199199
void apply(
200200
const NamedModulePointerApplyFunction& function,
201-
std::string name_prefix = std::string()) const;
201+
const std::string& name_prefix = std::string()) const;
202202

203203
/// Returns the parameters of this `Module` and if `recurse` is true, also
204204
/// recursively of every submodule.
@@ -243,7 +243,7 @@ class TORCH_API Module : public std::enable_shared_from_this<Module> {
243243
/// stored in a `shared_ptr`.
244244
/// \endrst
245245
OrderedDict<std::string, std::shared_ptr<Module>> named_modules(
246-
std::string name_prefix = std::string(),
246+
const std::string& name_prefix = std::string(),
247247
bool include_self = true) const;
248248

249249
/// Returns the direct submodules of this `Module`.

torch/csrc/api/src/data/datasets/mnist.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -51,11 +51,11 @@ uint32_t expect_int32(std::ifstream& stream, uint32_t expected) {
5151
return value;
5252
}
5353

54-
std::string join_paths(std::string head, std::string tail) {
54+
std::string join_paths(std::string head, const std::string& tail) {
5555
if (head.back() != '/') {
5656
head.push_back('/');
5757
}
58-
head += std::move(tail);
58+
head += tail;
5959
return head;
6060
}
6161

torch/csrc/api/src/nn/module.cpp

+10-10
Original file line numberDiff line numberDiff line change
@@ -101,26 +101,26 @@ void Module::apply(const ConstModuleApplyFunction& function) const {
101101

102102
void Module::apply(
103103
const NamedModuleApplyFunction& function,
104-
std::string name_prefix) {
104+
const std::string& name_prefix) {
105105
function(/*name=*/name_prefix, *this);
106106
apply_to_submodules(
107107
[&function](
108108
const std::string& name, const std::shared_ptr<Module>& module) {
109109
function(name, *module);
110110
},
111-
std::move(name_prefix));
111+
name_prefix);
112112
}
113113

114114
void Module::apply(
115115
const ConstNamedModuleApplyFunction& function,
116-
std::string name_prefix) const {
116+
const std::string& name_prefix) const {
117117
function(/*name=*/name_prefix, *this);
118118
apply_to_submodules(
119119
[&function](
120120
const std::string& name, const std::shared_ptr<Module>& module) {
121121
function(name, *module);
122122
},
123-
std::move(name_prefix));
123+
name_prefix);
124124
}
125125

126126
void Module::apply(const ModulePointerApplyFunction& function) const {
@@ -133,10 +133,10 @@ void Module::apply(const ModulePointerApplyFunction& function) const {
133133

134134
void Module::apply(
135135
const NamedModulePointerApplyFunction& function,
136-
std::string name_prefix) const {
136+
const std::string& name_prefix) const {
137137
function(
138138
/*name=*/name_prefix, shared_from_this_checked());
139-
apply_to_submodules(function, std::move(name_prefix));
139+
apply_to_submodules(function, name_prefix);
140140
}
141141

142142
std::vector<Tensor> Module::parameters(bool recurse) const {
@@ -199,7 +199,7 @@ std::vector<std::shared_ptr<Module>> Module::modules(bool include_self) const {
199199
}
200200

201201
OrderedDict<std::string, std::shared_ptr<Module>> Module::named_modules(
202-
std::string name_prefix,
202+
const std::string& name_prefix,
203203
bool include_self) const {
204204
OrderedDict<std::string, std::shared_ptr<Module>> result;
205205
if (include_self) {
@@ -208,14 +208,14 @@ OrderedDict<std::string, std::shared_ptr<Module>> Module::named_modules(
208208
const std::string& key, const std::shared_ptr<Module>& module) {
209209
result.insert(key, module);
210210
},
211-
std::move(name_prefix));
211+
name_prefix);
212212
} else {
213213
apply_to_submodules(
214214
[&result](
215215
const std::string& key, const std::shared_ptr<Module>& module) {
216216
result.insert(key, module);
217217
},
218-
std::move(name_prefix));
218+
name_prefix);
219219
}
220220
return result;
221221
}
@@ -329,7 +329,7 @@ void Module::apply_to_submodules(
329329
for (const auto& child : children_) {
330330
auto qualified_name = join_name(name_prefix, child.key());
331331
function(qualified_name, child.value());
332-
child.value()->apply_to_submodules(function, std::move(qualified_name));
332+
child.value()->apply_to_submodules(function, qualified_name);
333333
}
334334
}
335335

torch/csrc/api/src/nn/modules/batchnorm.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,7 @@ namespace torch {
1313
namespace nn {
1414
BatchNormOptions::BatchNormOptions(int64_t features) : features_(features) {}
1515

16-
BatchNormImpl::BatchNormImpl(BatchNormOptions options)
17-
: options(std::move(options)) {
16+
BatchNormImpl::BatchNormImpl(BatchNormOptions options) : options(options) {
1817
reset();
1918
}
2019

torch/csrc/api/src/nn/modules/embedding.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ EmbeddingOptions::EmbeddingOptions(int64_t count, int64_t dimension)
1414
: count_(count), dimension_(dimension) {}
1515

1616
EmbeddingImpl::EmbeddingImpl(EmbeddingOptions options)
17-
: options(std::move(options)) {
17+
: options(options) {
1818
reset();
1919
}
2020

torch/csrc/api/src/nn/modules/linear.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ namespace torch {
1010
namespace nn {
1111
LinearOptions::LinearOptions(int64_t in, int64_t out) : in_(in), out_(out) {}
1212

13-
LinearImpl::LinearImpl(LinearOptions options) : options(std::move(options)) {
13+
LinearImpl::LinearImpl(LinearOptions options) : options(options) {
1414
reset();
1515
}
1616

torch/csrc/api/src/nn/modules/rnn.cpp

+5-7
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@ RNNOutput RNNImplBase<Derived>::generic_forward(
131131
}
132132
Tensor output, new_state;
133133
std::tie(output, new_state) = function(
134-
std::move(input),
134+
input,
135135
std::move(state),
136136
flat_weights_,
137137
options.with_bias_,
@@ -208,12 +208,12 @@ RNNOutput RNNImpl::forward(const Tensor& input, Tensor state) {
208208
case RNNActivation::ReLU:
209209
return generic_forward(
210210
static_cast<RNNFunctionSignature*>(&torch::rnn_relu),
211-
std::move(input),
211+
input,
212212
std::move(state));
213213
case RNNActivation::Tanh:
214214
return generic_forward(
215215
static_cast<RNNFunctionSignature*>(&torch::rnn_tanh),
216-
std::move(input),
216+
input,
217217
std::move(state));
218218
default:
219219
AT_ERROR("Unhandled RNN activation function!");
@@ -244,7 +244,7 @@ RNNOutput LSTMImpl::forward(const Tensor& input, Tensor state) {
244244
}
245245
Tensor output, hidden_state, cell_state;
246246
std::tie(output, hidden_state, cell_state) = torch::lstm(
247-
std::move(input),
247+
input,
248248
{state[0], state[1]},
249249
flat_weights_,
250250
options.with_bias_,
@@ -266,9 +266,7 @@ GRUImpl::GRUImpl(const GRUOptions& options)
266266

267267
RNNOutput GRUImpl::forward(const Tensor& input, Tensor state) {
268268
return generic_forward(
269-
static_cast<RNNFunctionSignature*>(&torch::gru),
270-
std::move(input),
271-
std::move(state));
269+
static_cast<RNNFunctionSignature*>(&torch::gru), input, std::move(state));
272270
}
273271
} // namespace nn
274272
} // namespace torch

torch/csrc/api/src/optim/serialize.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ void serialize(
1717
const std::string& key,
1818
const std::vector<int64_t>& steps) {
1919
std::vector<torch::Tensor> tensors;
20+
tensors.reserve(steps.size());
2021
for (const auto& step : steps) {
2122
tensors.push_back(torch::tensor(static_cast<int64_t>(step)));
2223
}

torch/csrc/autograd/VariableTypeUtils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ inline Tensor as_variable(Tensor tensor) {
153153

154154
inline std::vector<Tensor> as_variable(TensorList tl) {
155155
return fmap(tl, [](const Tensor& t) -> Tensor {
156-
return make_variable(std::move(t), /*requires_grad=*/false);
156+
return make_variable(t, /*requires_grad=*/false);
157157
});
158158
}
159159

torch/csrc/autograd/engine.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -408,7 +408,7 @@ static variable_list call_function(FunctionTask& task) {
408408

409409
if(has_post_hooks){
410410
// NOLINTNEXTLINE(bugprone-use-after-move)
411-
return call_post_hooks(fn, std::move(outputs), std::move(inputs));
411+
return call_post_hooks(fn, std::move(outputs), inputs);
412412
}
413413
return outputs;
414414
}

torch/csrc/autograd/python_function.cpp

+1-4
Original file line numberDiff line numberDiff line change
@@ -585,10 +585,7 @@ static Node* _trace_pre_record(
585585
Py_INCREF(op_obj);
586586
auto pyobj = THPObjectPtr(op_obj);
587587
return jit::tracer::preRecordPythonTrace(
588-
std::move(pyobj),
589-
std::move(arg_types),
590-
input_vars,
591-
std::move(scalar_args));
588+
std::move(pyobj), arg_types, input_vars, std::move(scalar_args));
592589
}
593590

594591
static void _trace_post_record(

torch/csrc/jit/batched/BatchTensor.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ BatchTensor::BatchTensor(const std::vector<at::Tensor>& datalist, at::Tensor dim
3131
sizes[0] = bs;
3232
mask_sizes[0] = bs;
3333
for(int64_t i = 1; i < dims.size(0) + 1; i++){
34-
for(auto x : datalist){
34+
for(const auto& x : datalist){
3535
sizes[i] = std::max(sizes[i], x.size(i));
3636
}
3737
mask_sizes[i] = *dims[i - 1].data<uint8_t>() ? sizes[i] : 1;

torch/csrc/jit/constants.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -102,27 +102,27 @@ RegisterOperators reg({
102102
return 0;
103103
};
104104
} else if(type->isSubtypeOf(ListType::ofInts())) {
105-
auto is = node->is(attr::value);
105+
const auto& is = node->is(attr::value);
106106
return [is](Stack& stack) {
107107
push(stack, is);
108108
return 0;
109109
};
110110
} else if(type->isSubtypeOf(ListType::ofBools())) {
111-
auto bs = node->is(attr::value);
111+
const auto& bs = node->is(attr::value);
112112
return [bs](Stack& stack) {
113113
push(stack, bs);
114114
return 0;
115115
};
116116
} else if(type->isSubtypeOf(ListType::ofTensors())) {
117-
auto ts = fmap(node->ts(attr::value), [](const at::Tensor & t) -> at::Tensor {
117+
const auto& ts = fmap(node->ts(attr::value), [](const at::Tensor & t) -> at::Tensor {
118118
return autograd::make_variable(t);
119119
});
120120
return [ts](Stack& stack) {
121121
push(stack, ts);
122122
return 0;
123123
};
124124
} else if (type == StringType::get()) {
125-
auto s = node->s(attr::value);
125+
const auto& s = node->s(attr::value);
126126
return [s](Stack& stack) {
127127
push(stack, s);
128128
return 0;

torch/csrc/jit/custom_operator.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ Node* getTracedNode(
6363
const std::tuple<Types...>& tuple) {
6464
auto symbol = Symbol::fromQualString(schema.name());
6565
const auto& graph = tracer::getTracingState()->graph;
66-
Node* node = graph->create(std::move(symbol), /*num_outputs=*/0);
66+
Node* node = graph->create(symbol, /*num_outputs=*/0);
6767
tracer::recordSourceLocation(node);
6868

6969
// Hack to call addInputs for the parameter pack in a sequenced fashion.

torch/csrc/jit/fuser/compiler.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ int debugFuser() {
5151
// If the given node is used once by a chunk node, returns that node.
5252
// Returns nullptr otherwise.
5353
static const Node* usedInFusedChunk(const Value* input) {
54-
const auto uses = input->uses();
54+
const auto& uses = input->uses();
5555
if (uses.size() == 1) {
5656
const Node *user = uses[0].user;
5757
if (user->kind() == prim::ConstantChunk) {

torch/csrc/jit/graph_executor.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -503,7 +503,7 @@ struct GraphExecutorImpl {
503503
}
504504

505505
void runTraced(Stack & stack) {
506-
auto state = tracer::getTracingState();
506+
const auto& state = tracer::getTracingState();
507507
auto inputs = last(stack, num_inputs);
508508
auto input_values = fmap(inputs, [](const IValue & v) {
509509
return tracer::getNestedValueTrace(v);

torch/csrc/jit/init.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -300,7 +300,7 @@ void initJITBindings(PyObject *module) {
300300
m.def("_jit_get_operation", [](const std::string& qualified_name) {
301301
try {
302302
auto symbol = Symbol::fromQualString(qualified_name);
303-
auto operations = getAllOperatorsFor(std::move(symbol));
303+
auto operations = getAllOperatorsFor(symbol);
304304
AT_CHECK(!operations.empty(), "No such operator ", qualified_name);
305305
AT_CHECK(
306306
operations.size() == 1,
@@ -338,7 +338,7 @@ void initJITBindings(PyObject *module) {
338338
});
339339
m.def("_jit_get_schemas_for_operator", [](const std::string& qualified_name) {
340340
auto symbol = Symbol::fromQualString(qualified_name);
341-
auto operations = getAllOperatorsFor(std::move(symbol));
341+
auto operations = getAllOperatorsFor(symbol);
342342
return fmap(operations, [](const std::shared_ptr<Operator>& op) {
343343
return op->schema();
344344
});

torch/csrc/jit/interpreter.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -73,7 +73,7 @@ struct Suspend : public std::exception {
7373

7474
struct InterpreterContinuation {
7575
InterpreterContinuation(InterpreterState state_, Stack stack_)
76-
: state(std::move(state_)), stack(std::move(stack_)) {}
76+
: state(state_), stack(std::move(stack_)) {}
7777

7878
void operator()() {
7979
state.runAsync(stack);

torch/csrc/jit/operator.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -253,7 +253,7 @@ struct SchemaParser {
253253
n = "-" + L.expect(TK_NUMBER).text();
254254
else
255255
n = L.expect(TK_NUMBER).text();
256-
if(kind == TypeKind::FloatType || n.find(".") != std::string::npos || n.find("e") != std::string::npos) {
256+
if(kind == TypeKind::FloatType || n.find('.') != std::string::npos || n.find('e') != std::string::npos) {
257257
return std::stod(n);
258258
} else {
259259
int64_t v = std::stoll(n);
@@ -405,7 +405,7 @@ struct OperatorRegistry {
405405

406406
// XXX - caller must be holding lock
407407
void registerPendingOperators() {
408-
for(auto op : to_register) {
408+
for(const auto& op : to_register) {
409409
Symbol sym = Symbol::fromQualString(op->schema().name());
410410
operators[sym].push_back(op);
411411
operators_by_sig[canonicalSchemaString(op->schema())] = op;

torch/csrc/jit/passes/alias_analysis.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -531,7 +531,7 @@ void AliasDb::addAlias(const Value* value, Symbol alias) {
531531
valueToAlias_[value].addSet(alias);
532532
} else {
533533
AliasInfo aliasInfo;
534-
aliasInfo.addSet(std::move(alias));
534+
aliasInfo.addSet(alias);
535535
valueToAlias_.insert({value, std::move(aliasInfo)});
536536
}
537537
}

torch/csrc/jit/passes/graph_fuser.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -973,6 +973,7 @@ void PeepholeOptimizeShapeExpressions(Block * block) {
973973
}
974974
if (unique_to_value.size() != node->inputs().size()) {
975975
std::vector<Value*> inputs;
976+
inputs.reserve(unique_to_value.size());
976977
for (auto & entry : unique_to_value) {
977978
inputs.push_back(entry.second);
978979
}

torch/csrc/jit/passes/python_print.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -637,7 +637,7 @@ struct PythonPrintPass {
637637
} else if(v.isTensorList()) {
638638
stmt << "[";
639639
const char* delim = "";
640-
for(auto t : v.toTensorListRef()) {
640+
for(const auto& t : v.toTensorListRef()) {
641641
stmt << delim << "CONSTANTS.c" << getOrAddTensorConstant(t);
642642
delim = ", ";
643643
}

0 commit comments

Comments
 (0)