Skip to content

Commit 91b26e4

Browse files
committed
Address 'cppcoreguidelines-*' clang-tidy remarks
1 parent 028996d commit 91b26e4

12 files changed

Lines changed: 153 additions & 174 deletions

File tree

.clang-tidy

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,15 @@
11
Checks: >
22
bugprone-*,
3+
cppcoreguidelines-*,
34
modernize-*,
45
misc-*,
56
performance-*,
67
portability-*,
78
readability-*,
9+
-cppcoreguidelines-avoid-magic-numbers,
10+
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
11+
-cppcoreguidelines-narrowing-conversions,
12+
-cppcoreguidelines-avoid-non-const-global-variables,
813
-google-readability-braces-around-statements,
914
-google-readability-namespace-comments,
1015
-google-runtime-references,

src/Weights_Reader/reader_weights.cpp

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -44,12 +44,13 @@ json read_json(const std::string& filename) {
4444
return result;
4545

4646
#else
47-
int fd = open(filename.c_str(), O_RDONLY);
47+
int fd = open(filename.c_str(),
48+
O_RDONLY); // NOLINT(cppcoreguidelines-pro-type-vararg)
4849
if (fd == -1) {
4950
throw std::runtime_error("Cannot open file: " + filename);
5051
}
5152

52-
struct stat sb;
53+
struct stat sb {};
5354
fstat(fd, &sb);
5455

5556
if (sb.st_size == 0) {

src/graph/graph.cpp

Lines changed: 34 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,18 @@
2626

2727
namespace it_lab_ai {
2828

29+
namespace {
30+
template <typename T>
31+
std::shared_ptr<Layer> clone_layer_checked(
32+
const std::shared_ptr<Layer>& layer) {
33+
const auto* casted = dynamic_cast<const T*>(layer.get());
34+
if (casted == nullptr) {
35+
throw std::invalid_argument("Layer type mismatch while cloning");
36+
}
37+
return std::make_shared<T>(*casted);
38+
}
39+
} // namespace
40+
2941
void Graph::clone(Graph& result, Tensor& out,
3042
const RuntimeOptions& options) const {
3143
result.arrayE_ = this->arrayE_;
@@ -61,110 +73,70 @@ std::shared_ptr<Layer> layer_based_shared_copy(
6173
const std::shared_ptr<Layer>& layer, const RuntimeOptions& options) {
6274
switch (layer->getName()) {
6375
case it_lab_ai::kInput: {
64-
auto* tmp_layer = new InputLayer(*dynamic_cast<InputLayer*>(layer.get()));
65-
return std::shared_ptr<Layer>(tmp_layer);
76+
return clone_layer_checked<InputLayer>(layer);
6677
}
6778
case it_lab_ai::kPooling: {
6879
if (options.backend == Backend::kOneDnn) {
69-
auto* tmp_layer = new PoolingLayerOneDnn(
70-
*dynamic_cast<PoolingLayerOneDnn*>(layer.get()));
71-
return std::shared_ptr<Layer>(tmp_layer);
80+
return clone_layer_checked<PoolingLayerOneDnn>(layer);
7281
}
73-
auto* tmp_layer =
74-
new PoolingLayer(*dynamic_cast<PoolingLayer*>(layer.get()));
75-
return std::shared_ptr<Layer>(tmp_layer);
82+
return clone_layer_checked<PoolingLayer>(layer);
7683
}
7784
case it_lab_ai::kElementWise: {
7885
if (options.backend == Backend::kOneDnn) {
79-
auto* tmp_layer =
80-
new EwLayerOneDnn(*dynamic_cast<EwLayerOneDnn*>(layer.get()));
81-
return std::shared_ptr<Layer>(tmp_layer);
86+
return clone_layer_checked<EwLayerOneDnn>(layer);
8287
}
83-
auto* tmp_layer = new EWLayer(*dynamic_cast<EWLayer*>(layer.get()));
84-
return std::shared_ptr<Layer>(tmp_layer);
88+
return clone_layer_checked<EWLayer>(layer);
8589
}
8690
case it_lab_ai::kConvolution: {
8791
if (options.backend == Backend::kOneDnn) {
88-
auto* tmp_layer =
89-
new ConvLayerOneDnn(*dynamic_cast<ConvLayerOneDnn*>(layer.get()));
90-
return std::shared_ptr<Layer>(tmp_layer);
92+
return clone_layer_checked<ConvLayerOneDnn>(layer);
9193
}
92-
auto* tmp_layer = new ConvolutionalLayer(
93-
*dynamic_cast<ConvolutionalLayer*>(layer.get()));
94-
return std::shared_ptr<Layer>(tmp_layer);
94+
return clone_layer_checked<ConvolutionalLayer>(layer);
9595
}
9696
case it_lab_ai::kFullyConnected: {
97-
auto* tmp_layer = new FCLayer(*dynamic_cast<FCLayer*>(layer.get()));
98-
return std::shared_ptr<Layer>(tmp_layer);
97+
return clone_layer_checked<FCLayer>(layer);
9998
}
10099
case it_lab_ai::kFlatten: {
101-
auto* tmp_layer =
102-
new FlattenLayer(*dynamic_cast<FlattenLayer*>(layer.get()));
103-
return std::shared_ptr<Layer>(tmp_layer);
100+
return clone_layer_checked<FlattenLayer>(layer);
104101
}
105102
case it_lab_ai::kConcat: {
106-
auto* tmp_layer =
107-
new ConcatLayer(*dynamic_cast<ConcatLayer*>(layer.get()));
108-
return std::shared_ptr<Layer>(tmp_layer);
103+
return clone_layer_checked<ConcatLayer>(layer);
109104
}
110105
case it_lab_ai::kDropout: {
111-
auto* tmp_layer =
112-
new DropOutLayer(*dynamic_cast<DropOutLayer*>(layer.get()));
113-
return std::shared_ptr<Layer>(tmp_layer);
106+
return clone_layer_checked<DropOutLayer>(layer);
114107
}
115108
case it_lab_ai::kSplit: {
116-
auto* tmp_layer = new SplitLayer(*dynamic_cast<SplitLayer*>(layer.get()));
117-
return std::shared_ptr<Layer>(tmp_layer);
109+
return clone_layer_checked<SplitLayer>(layer);
118110
}
119111
case it_lab_ai::kBinaryOp: {
120112
if (options.backend == Backend::kOneDnn) {
121-
auto* tmp_layer = new BinaryOpLayerOneDnn(
122-
*dynamic_cast<BinaryOpLayerOneDnn*>(layer.get()));
123-
return std::shared_ptr<Layer>(tmp_layer);
113+
return clone_layer_checked<BinaryOpLayerOneDnn>(layer);
124114
}
125-
auto* tmp_layer =
126-
new BinaryOpLayer(*dynamic_cast<BinaryOpLayer*>(layer.get()));
127-
return std::shared_ptr<Layer>(tmp_layer);
115+
return clone_layer_checked<BinaryOpLayer>(layer);
128116
}
129117
case it_lab_ai::kTranspose: {
130-
auto* tmp_layer =
131-
new TransposeLayer(*dynamic_cast<TransposeLayer*>(layer.get()));
132-
return std::shared_ptr<Layer>(tmp_layer);
118+
return clone_layer_checked<TransposeLayer>(layer);
133119
}
134120
case it_lab_ai::kMatmul: {
135-
auto* tmp_layer =
136-
new MatmulLayer(*dynamic_cast<MatmulLayer*>(layer.get()));
137-
return std::shared_ptr<Layer>(tmp_layer);
121+
return clone_layer_checked<MatmulLayer>(layer);
138122
}
139123
case it_lab_ai::kReshape: {
140-
auto* tmp_layer =
141-
new ReshapeLayer(*dynamic_cast<ReshapeLayer*>(layer.get()));
142-
return std::shared_ptr<Layer>(tmp_layer);
124+
return clone_layer_checked<ReshapeLayer>(layer);
143125
}
144126
case it_lab_ai::kSoftmax: {
145-
auto* tmp_layer =
146-
new SoftmaxLayer(*dynamic_cast<SoftmaxLayer*>(layer.get()));
147-
return std::shared_ptr<Layer>(tmp_layer);
127+
return clone_layer_checked<SoftmaxLayer>(layer);
148128
}
149129
case it_lab_ai::kReduce: {
150130
if (options.backend == Backend::kOneDnn) {
151-
auto* tmp_layer = new ReduceLayerOneDnn(
152-
*dynamic_cast<ReduceLayerOneDnn*>(layer.get()));
153-
return std::shared_ptr<Layer>(tmp_layer);
131+
return clone_layer_checked<ReduceLayerOneDnn>(layer);
154132
}
155-
auto* tmp_layer =
156-
new ReduceLayer(*dynamic_cast<ReduceLayer*>(layer.get()));
157-
return std::shared_ptr<Layer>(tmp_layer);
133+
return clone_layer_checked<ReduceLayer>(layer);
158134
}
159135
case it_lab_ai::kBatchNormalization: {
160-
auto* tmp_layer = new BatchNormalizationLayer(
161-
*dynamic_cast<BatchNormalizationLayer*>(layer.get()));
162-
return std::shared_ptr<Layer>(tmp_layer);
136+
return clone_layer_checked<BatchNormalizationLayer>(layer);
163137
}
164138
case it_lab_ai::kOutput: {
165-
auto* tmp_layer =
166-
new OutputLayer(*dynamic_cast<OutputLayer*>(layer.get()));
167-
return std::shared_ptr<Layer>(tmp_layer);
139+
return clone_layer_checked<OutputLayer>(layer);
168140
}
169141
default: {
170142
throw std::invalid_argument("No such layer type");

src/graph_transformations/graph_transformations.cpp

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -137,8 +137,6 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
137137
std::vector<int> leaves;
138138
std::vector<int> roots_inps_final;
139139
std::vector<int> leaves_outs_final;
140-
size_t amount_connected;
141-
size_t amount_connected_s;
142140
for (int v = 0; v < subgraph_from.getLayersCount(); v++) {
143141
if (is_root(subgraph_from, v)) {
144142
roots.push_back(v);
@@ -175,8 +173,9 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
175173
}
176174
}
177175
// recognize transformations we can apply with roots
178-
amount_connected = new_graph.getOutputsSize(subs[i][roots[j]]);
179-
amount_connected_s = subgraph_from.getOutputsSize(roots[j]);
176+
const size_t amount_connected =
177+
new_graph.getOutputsSize(subs[i][roots[j]]);
178+
const size_t amount_connected_s = subgraph_from.getOutputsSize(roots[j]);
180179
if (amount_connected == amount_connected_s) {
181180
continue;
182181
}
@@ -189,7 +188,7 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
189188
}
190189
}
191190
for (int leaf : leaves) {
192-
amount_connected = new_graph.getOutputsSize(subs[i][leaf]);
191+
const size_t amount_connected = new_graph.getOutputsSize(subs[i][leaf]);
193192
for (size_t k = 0; k < amount_connected; k++) {
194193
int id = new_graph.getOutLayers(subs[i][leaf])[k];
195194
auto it =
@@ -242,8 +241,6 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
242241
std::vector<int> leaves_to;
243242
std::vector<std::vector<int>> roots_inps_final;
244243
std::vector<std::vector<int>> leaves_outs_final;
245-
size_t amount_connected;
246-
size_t amount_connected_s;
247244
for (int v = 0; v < subgraph_from.getLayersCount(); v++) {
248245
if (is_root(subgraph_from, v)) {
249246
roots_from.push_back(v);
@@ -296,8 +293,10 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
296293
for (size_t j = 0; j < roots_from.size(); j++) {
297294
roots_inps_final[j] = new_graph.getInLayers(subs[i][roots_from[j]]);
298295
// recognize transformations we can apply with roots
299-
amount_connected = new_graph.getOutputsSize(subs[i][roots_from[j]]);
300-
amount_connected_s = subgraph_from.getOutputsSize(roots_from[j]);
296+
const size_t amount_connected =
297+
new_graph.getOutputsSize(subs[i][roots_from[j]]);
298+
const size_t amount_connected_s =
299+
subgraph_from.getOutputsSize(roots_from[j]);
301300
if (amount_connected == amount_connected_s) {
302301
continue;
303302
}
@@ -310,7 +309,8 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
310309
}
311310
}
312311
for (size_t j = 0; j < leaves_from.size(); j++) {
313-
amount_connected = new_graph.getOutputsSize(subs[i][leaves_from[j]]);
312+
const size_t amount_connected =
313+
new_graph.getOutputsSize(subs[i][leaves_from[j]]);
314314
for (size_t k = 0; k < amount_connected; k++) {
315315
int id = new_graph.getOutLayers(subs[i][leaves_from[j]])[k];
316316
leaves_outs_final[j].push_back(id);

src/layers/FCLayer.cpp

Lines changed: 11 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -17,20 +17,18 @@ void FCLayer::run(const std::vector<Tensor>& input,
1717
throw std::invalid_argument("Bias and weights data type aren't same");
1818
}
1919

20-
size_t batch_size;
21-
size_t output_size = bias_->get_shape()[0];
22-
if (input[0].get_shape().dims() == 1) {
23-
size_t total_elements = input[0].get_shape()[0];
24-
size_t expected_input_size = weights_->get_shape()[0];
25-
26-
if (total_elements % expected_input_size == 0) {
27-
batch_size = total_elements / expected_input_size;
28-
} else {
29-
batch_size = 1;
20+
const size_t batch_size = [&]() -> size_t {
21+
if (input[0].get_shape().dims() == 1) {
22+
const size_t total_elements = input[0].get_shape()[0];
23+
const size_t expected_input_size = weights_->get_shape()[0];
24+
if (total_elements % expected_input_size == 0) {
25+
return total_elements / expected_input_size;
26+
}
27+
return 1;
3028
}
31-
} else {
32-
batch_size = input[0].get_shape()[0];
33-
}
29+
return input[0].get_shape()[0];
30+
}();
31+
size_t output_size = bias_->get_shape()[0];
3432

3533
switch (input[0].get_type()) {
3634
case Type::kInt: {

src/layers/FlattenLayer.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,8 @@ namespace it_lab_ai {
44

55
std::vector<size_t> reorder(std::vector<size_t> order_vec,
66
std::vector<size_t> order) {
7-
size_t min_ind;
8-
for (size_t i = 0; i < order.size() - 1; i++) {
9-
min_ind = i;
7+
for (size_t i = 0; i + 1 < order.size(); i++) {
8+
size_t min_ind = i;
109
for (size_t j = i + 1; j < order.size(); j++) {
1110
if (order[j] < order[min_ind]) {
1211
min_ind = j;

src/layers/Shape.cpp

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,11 +7,11 @@ size_t Shape::get_index(const std::vector<size_t>& coords) const {
77
throw std::invalid_argument("Invalid index vector");
88
}
99
size_t res = 0;
10-
size_t mulbuf;
1110
for (size_t i = 0; i < coords.size(); i++) {
1211
// to get to the i line
13-
mulbuf = std::accumulate(dims_.begin() + (i + 1), dims_.end(),
14-
static_cast<size_t>(1), std::multiplies<>());
12+
const size_t mulbuf =
13+
std::accumulate(dims_.cbegin() + (i + 1), dims_.cend(),
14+
static_cast<size_t>(1), std::multiplies<>());
1515
if (coords[i] >= dims_[i]) {
1616
throw std::out_of_range("Out of range");
1717
}

src/layers_oneDNN/BinaryOpLayer.cpp

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -30,10 +30,16 @@ void BinaryOpLayerOneDnn::run(const std::vector<Tensor>& input,
3030
const auto& src1_data = *b.as<float>();
3131
std::vector<float> dst_data(output_shape_.count());
3232

33-
dnnl::memory src0_mem(src0_md_, *engine_,
34-
const_cast<float*>(src0_data.data()));
35-
dnnl::memory src1_mem(src1_md_, *engine_,
36-
const_cast<float*>(src1_data.data()));
33+
dnnl::memory src0_mem(
34+
src0_md_, *engine_,
35+
const_cast<float*>(
36+
src0_data
37+
.data())); // NOLINT(cppcoreguidelines-pro-type-const-cast)
38+
dnnl::memory src1_mem(
39+
src1_md_, *engine_,
40+
const_cast<float*>(
41+
src1_data
42+
.data())); // NOLINT(cppcoreguidelines-pro-type-const-cast)
3743
dnnl::memory dst_mem(dst_md_, *engine_, dst_data.data());
3844

3945
binary_prim_->execute(*stream_, {{DNNL_ARG_SRC_0, src0_mem},
@@ -47,10 +53,16 @@ void BinaryOpLayerOneDnn::run(const std::vector<Tensor>& input,
4753
const auto& src1_data = *b.as<int>();
4854
std::vector<int> dst_data(output_shape_.count());
4955

50-
dnnl::memory src0_mem(src0_md_, *engine_,
51-
const_cast<int*>(src0_data.data()));
52-
dnnl::memory src1_mem(src1_md_, *engine_,
53-
const_cast<int*>(src1_data.data()));
56+
dnnl::memory src0_mem(
57+
src0_md_, *engine_,
58+
const_cast<int*>(
59+
src0_data
60+
.data())); // NOLINT(cppcoreguidelines-pro-type-const-cast)
61+
dnnl::memory src1_mem(
62+
src1_md_, *engine_,
63+
const_cast<int*>(
64+
src1_data
65+
.data())); // NOLINT(cppcoreguidelines-pro-type-const-cast)
5466
dnnl::memory dst_mem(dst_md_, *engine_, dst_data.data());
5567

5668
binary_prim_->execute(*stream_, {{DNNL_ARG_SRC_0, src0_mem},

0 commit comments

Comments
 (0)