Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
@@ -1,12 +1,26 @@
Checks: >
bugprone-*,
google-*,
cppcoreguidelines-*,
modernize-*,
misc-*,
performance-*,
portability-*,
readability-*,
-google-build-using-namespace,
-cppcoreguidelines-avoid-magic-numbers,
-cppcoreguidelines-avoid-non-const-global-variables,
-cppcoreguidelines-narrowing-conversions,
-cppcoreguidelines-non-private-member-variables-in-classes,
-cppcoreguidelines-prefer-member-initializer,
-cppcoreguidelines-pro-bounds-pointer-arithmetic,
-cppcoreguidelines-pro-type-const-cast,
-cppcoreguidelines-pro-type-reinterpret-cast,
-cppcoreguidelines-pro-type-member-init,
-cppcoreguidelines-special-member-functions,
-google-readability-braces-around-statements,
-google-readability-namespace-comments,
-google-runtime-references,
-misc-non-private-member-variables-in-classes,
-misc-const-correctness,
-misc-include-cleaner,
Expand Down
1 change: 1 addition & 0 deletions include/layers/Layer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,7 @@ class LayerImpl {
LayerImpl() = default;
LayerImpl(const Shape& inputShape, const Shape& outputShape)
: inputShape_(inputShape), outputShape_(outputShape) {}
virtual ~LayerImpl() = default;
LayerImpl(const LayerImpl& c) = default;
LayerImpl& operator=(const LayerImpl& c) = default;
[[nodiscard]] virtual std::vector<ValueType> run(
Expand Down
26 changes: 13 additions & 13 deletions include/layers/PoolingLayer.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -179,18 +179,18 @@ PoolingLayerImpl<ValueType>::PoolingLayerImpl(

size_t effective_kernel_size = (kernel_size - 1) * dilation + 1;

size_t output_size;
if (ceil_mode) {
output_size = static_cast<size_t>(
std::ceil((input_size + pad - effective_kernel_size) /
static_cast<float>(stride))) +
1;
} else {
output_size = static_cast<size_t>(
std::floor((input_size + pad - effective_kernel_size) /
static_cast<float>(stride))) +
1;
}
size_t output_size = [=]() {
if (ceil_mode) {
return static_cast<size_t>(
std::ceil((input_size + pad - effective_kernel_size) /
static_cast<float>(stride))) +
1;
}
return static_cast<size_t>(
std::floor((input_size + pad - effective_kernel_size) /
static_cast<float>(stride))) +
1;
}();

this->outputShape_[input_shape.dims() - pooling_shape.dims() + i] =
output_size;
Expand Down Expand Up @@ -321,4 +321,4 @@ std::vector<ValueType> PoolingLayerImpl<ValueType>::run(
return res;
}

} // namespace it_lab_ai
} // namespace it_lab_ai
22 changes: 13 additions & 9 deletions include/perf/benchmarking.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <cmath>
#include <numeric>
#include <stdexcept>
#include <utility>
#include <vector>

namespace it_lab_ai {
Expand All @@ -16,7 +17,7 @@ template <typename DurationContainerType, typename DurationType, class Function,
DurationContainerType elapsed_time(Function&& func, Args&&... args) {
auto duration = std::chrono::duration<DurationContainerType, DurationType>();
auto start = std::chrono::high_resolution_clock::now();
func(args...);
std::forward<Function>(func)(std::forward<Args>(args)...);
auto end = std::chrono::high_resolution_clock::now();
duration = end - start;
return duration.count();
Expand All @@ -26,7 +27,7 @@ DurationContainerType elapsed_time(Function&& func, Args&&... args) {
template <class Function, typename... Args>
double elapsed_time_omp(Function&& func, Args&&... args) {
double start = omp_get_wtime();
func(args...);
std::forward<Function>(func)(std::forward<Args>(args)...);
double end = omp_get_wtime();
return end - start;
}
Expand All @@ -38,7 +39,7 @@ DurationContainerType elapsed_time_avg(const size_t iters, Function&& func,
auto duration = std::chrono::duration<DurationContainerType, DurationType>();
auto start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < iters; i++) {
func(args...);
std::forward<Function>(func)(std::forward<Args>(args)...);
}
auto end = std::chrono::high_resolution_clock::now();
duration = (end - start) / iters;
Expand All @@ -51,7 +52,7 @@ double elapsed_time_omp_avg(const size_t iters, Function&& func,
Args&&... args) {
double start = omp_get_wtime();
for (size_t i = 0; i < iters; i++) {
func(args...);
std::forward<Function>(func)(std::forward<Args>(args)...);
}
double end = omp_get_wtime();
return (end - start) / iters;
Expand All @@ -61,26 +62,29 @@ template <typename ThroughputContainerType, typename DurationType,
class Function, typename... Args>
ThroughputContainerType throughput(Function&& func, Args&&... args) {
return ThroughputContainerType(1) /
elapsed_time<ThroughputContainerType, DurationType>(func, args...);
elapsed_time<ThroughputContainerType, DurationType>(
std::forward<Function>(func), std::forward<Args>(args)...);
}

template <class Function, typename... Args>
double throughput_omp(Function&& func, Args&&... args) {
return 1 / elapsed_time_omp(func, args...);
return 1 / elapsed_time_omp(std::forward<Function>(func),
std::forward<Args>(args)...);
}

template <typename ThroughputContainerType, typename DurationType,
class Function, typename... Args>
ThroughputContainerType throughput_avg(const size_t iters, Function&& func,
Args&&... args) {
return ThroughputContainerType(1) /
elapsed_time_avg<ThroughputContainerType, DurationType>(iters, func,
args...);
elapsed_time_avg<ThroughputContainerType, DurationType>(
iters, std::forward<Function>(func), std::forward<Args>(args)...);
}

template <class Function, typename... Args>
double throughput_omp_avg(const size_t iters, Function&& func, Args&&... args) {
return 1 / elapsed_time_omp_avg(iters, func, args...);
return 1 / elapsed_time_omp_avg(iters, std::forward<Function>(func),
std::forward<Args>(args)...);
}

// as "Manhattan" norm of error-vector
Expand Down
3 changes: 2 additions & 1 deletion src/Weights_Reader/reader_weights.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,12 +44,13 @@ json read_json(const std::string& filename) {
return result;

#else
// NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
int fd = open(filename.c_str(), O_RDONLY);
if (fd == -1) {
throw std::runtime_error("Cannot open file: " + filename);
}

struct stat sb;
struct stat sb {};
fstat(fd, &sb);

if (sb.st_size == 0) {
Expand Down
96 changes: 34 additions & 62 deletions src/graph/graph.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,18 @@

namespace it_lab_ai {

namespace {
template <typename T>
std::shared_ptr<Layer> clone_layer_checked(
const std::shared_ptr<Layer>& layer) {
const auto* casted = dynamic_cast<const T*>(layer.get());
if (casted == nullptr) {
throw std::invalid_argument("Layer type mismatch while cloning");
}
return std::make_shared<T>(*casted);
}
} // namespace

void Graph::clone(Graph& result, Tensor& out,
const RuntimeOptions& options) const {
result.arrayE_ = this->arrayE_;
Expand Down Expand Up @@ -61,110 +73,70 @@ std::shared_ptr<Layer> layer_based_shared_copy(
const std::shared_ptr<Layer>& layer, const RuntimeOptions& options) {
switch (layer->getName()) {
case it_lab_ai::kInput: {
auto* tmp_layer = new InputLayer(*dynamic_cast<InputLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<InputLayer>(layer);
}
case it_lab_ai::kPooling: {
if (options.backend == Backend::kOneDnn) {
auto* tmp_layer = new PoolingLayerOneDnn(
*dynamic_cast<PoolingLayerOneDnn*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<PoolingLayerOneDnn>(layer);
}
auto* tmp_layer =
new PoolingLayer(*dynamic_cast<PoolingLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<PoolingLayer>(layer);
}
case it_lab_ai::kElementWise: {
if (options.backend == Backend::kOneDnn) {
auto* tmp_layer =
new EwLayerOneDnn(*dynamic_cast<EwLayerOneDnn*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<EwLayerOneDnn>(layer);
}
auto* tmp_layer = new EWLayer(*dynamic_cast<EWLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<EWLayer>(layer);
}
case it_lab_ai::kConvolution: {
if (options.backend == Backend::kOneDnn) {
auto* tmp_layer =
new ConvLayerOneDnn(*dynamic_cast<ConvLayerOneDnn*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<ConvLayerOneDnn>(layer);
}
auto* tmp_layer = new ConvolutionalLayer(
*dynamic_cast<ConvolutionalLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<ConvolutionalLayer>(layer);
}
case it_lab_ai::kFullyConnected: {
auto* tmp_layer = new FCLayer(*dynamic_cast<FCLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<FCLayer>(layer);
}
case it_lab_ai::kFlatten: {
auto* tmp_layer =
new FlattenLayer(*dynamic_cast<FlattenLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<FlattenLayer>(layer);
}
case it_lab_ai::kConcat: {
auto* tmp_layer =
new ConcatLayer(*dynamic_cast<ConcatLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<ConcatLayer>(layer);
}
case it_lab_ai::kDropout: {
auto* tmp_layer =
new DropOutLayer(*dynamic_cast<DropOutLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<DropOutLayer>(layer);
}
case it_lab_ai::kSplit: {
auto* tmp_layer = new SplitLayer(*dynamic_cast<SplitLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<SplitLayer>(layer);
}
case it_lab_ai::kBinaryOp: {
if (options.backend == Backend::kOneDnn) {
auto* tmp_layer = new BinaryOpLayerOneDnn(
*dynamic_cast<BinaryOpLayerOneDnn*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<BinaryOpLayerOneDnn>(layer);
}
auto* tmp_layer =
new BinaryOpLayer(*dynamic_cast<BinaryOpLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<BinaryOpLayer>(layer);
}
case it_lab_ai::kTranspose: {
auto* tmp_layer =
new TransposeLayer(*dynamic_cast<TransposeLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<TransposeLayer>(layer);
}
case it_lab_ai::kMatmul: {
auto* tmp_layer =
new MatmulLayer(*dynamic_cast<MatmulLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<MatmulLayer>(layer);
}
case it_lab_ai::kReshape: {
auto* tmp_layer =
new ReshapeLayer(*dynamic_cast<ReshapeLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<ReshapeLayer>(layer);
}
case it_lab_ai::kSoftmax: {
auto* tmp_layer =
new SoftmaxLayer(*dynamic_cast<SoftmaxLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<SoftmaxLayer>(layer);
}
case it_lab_ai::kReduce: {
if (options.backend == Backend::kOneDnn) {
auto* tmp_layer = new ReduceLayerOneDnn(
*dynamic_cast<ReduceLayerOneDnn*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<ReduceLayerOneDnn>(layer);
}
auto* tmp_layer =
new ReduceLayer(*dynamic_cast<ReduceLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<ReduceLayer>(layer);
}
case it_lab_ai::kBatchNormalization: {
auto* tmp_layer = new BatchNormalizationLayer(
*dynamic_cast<BatchNormalizationLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<BatchNormalizationLayer>(layer);
}
case it_lab_ai::kOutput: {
auto* tmp_layer =
new OutputLayer(*dynamic_cast<OutputLayer*>(layer.get()));
return std::shared_ptr<Layer>(tmp_layer);
return clone_layer_checked<OutputLayer>(layer);
}
default: {
throw std::invalid_argument("No such layer type");
Expand Down
20 changes: 10 additions & 10 deletions src/graph_transformations/graph_transformations.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,6 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
std::vector<int> leaves;
std::vector<int> roots_inps_final;
std::vector<int> leaves_outs_final;
size_t amount_connected;
size_t amount_connected_s;
for (int v = 0; v < subgraph_from.getLayersCount(); v++) {
if (is_root(subgraph_from, v)) {
roots.push_back(v);
Expand Down Expand Up @@ -175,8 +173,9 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
}
}
// recognize transformations we can apply with roots
amount_connected = new_graph.getOutputsSize(subs[i][roots[j]]);
amount_connected_s = subgraph_from.getOutputsSize(roots[j]);
const size_t amount_connected =
new_graph.getOutputsSize(subs[i][roots[j]]);
const size_t amount_connected_s = subgraph_from.getOutputsSize(roots[j]);
if (amount_connected == amount_connected_s) {
continue;
}
Expand All @@ -189,7 +188,7 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
}
}
for (int leaf : leaves) {
amount_connected = new_graph.getOutputsSize(subs[i][leaf]);
const size_t amount_connected = new_graph.getOutputsSize(subs[i][leaf]);
for (size_t k = 0; k < amount_connected; k++) {
int id = new_graph.getOutLayers(subs[i][leaf])[k];
auto it =
Expand Down Expand Up @@ -242,8 +241,6 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
std::vector<int> leaves_to;
std::vector<std::vector<int>> roots_inps_final;
std::vector<std::vector<int>> leaves_outs_final;
size_t amount_connected;
size_t amount_connected_s;
for (int v = 0; v < subgraph_from.getLayersCount(); v++) {
if (is_root(subgraph_from, v)) {
roots_from.push_back(v);
Expand Down Expand Up @@ -296,8 +293,10 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
for (size_t j = 0; j < roots_from.size(); j++) {
roots_inps_final[j] = new_graph.getInLayers(subs[i][roots_from[j]]);
// recognize transformations we can apply with roots
amount_connected = new_graph.getOutputsSize(subs[i][roots_from[j]]);
amount_connected_s = subgraph_from.getOutputsSize(roots_from[j]);
const size_t amount_connected =
new_graph.getOutputsSize(subs[i][roots_from[j]]);
const size_t amount_connected_s =
subgraph_from.getOutputsSize(roots_from[j]);
if (amount_connected == amount_connected_s) {
continue;
}
Expand All @@ -310,7 +309,8 @@ void changed_subgraphs(const Graph& graph, const Graph& subgraph_from,
}
}
for (size_t j = 0; j < leaves_from.size(); j++) {
amount_connected = new_graph.getOutputsSize(subs[i][leaves_from[j]]);
const size_t amount_connected =
new_graph.getOutputsSize(subs[i][leaves_from[j]]);
for (size_t k = 0; k < amount_connected; k++) {
int id = new_graph.getOutLayers(subs[i][leaves_from[j]])[k];
leaves_outs_final[j].push_back(id);
Expand Down
Loading
Loading