From e1f90afa8d23fbfdac9afddd43c061011fdc43ce Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 24 Feb 2026 12:16:41 +0300 Subject: [PATCH 01/38] numPhoto --- app/Graph/acc_check.cpp | 75 +++++++++++++++++++++++++++++++---------- 1 file changed, 57 insertions(+), 18 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index a864c36d..20ee1cbd 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -13,6 +13,7 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; RuntimeOptions options; + int numPhoto = 1000; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { @@ -48,6 +49,18 @@ int main(int argc, char* argv[]) { } } else if (std::string(argv[i]) == "--threads" && i + 1 < argc) { options.threads = std::stoi(argv[++i]); + } else { + try { + numPhoto = std::stoi(argv[i]); + + if (numPhoto < 1 || numPhoto > 50000) { + std::cerr << "Warning: numPhoto should be between 1 and 10000 " + << "Using value: " << numPhoto << std::endl; + } + } catch (const std::exception& e) { + std::cerr << "Error: Invalid numeric argument: " << argv[i] + << ". Using default value: 1000" << e.what()<(j, i); + res[(a)*28 * 28 + i * 28 + j] = channels[0].at(j, i); } } } @@ -153,33 +166,45 @@ int main(int argc, char* argv[]) { entry.path().extension() == ".jpg" || entry.path().extension() == ".jpeg") { counts[class_id]++; - total_images++; } } } } - if (total_images == 0) { - std::cerr << "No images found in dataset path: " << dataset_path << '\n'; - return 1; - } + size_t images_per_class_base = numPhoto / 1000; + size_t remaining = numPhoto % 1000; int channels = input_shape[1]; int height = input_shape[2]; int width = input_shape[3]; size_t image_size = channels * height * width; - all_image_data.resize(total_images * image_size); + all_image_data.reserve(numPhoto * image_size); + image_paths.reserve(numPhoto); + true_labels.reserve(numPhoto); size_t current_index = 0; + total_images = 0; + for (int class_id = 0; class_id < 1000; ++class_id) { + size_t need_from_class = images_per_class_base; + if (remaining > 0) { + need_from_class++; + remaining--; + } + + if (need_from_class == 0) continue; + std::ostringstream folder_oss; folder_oss << std::setw(5) << std::setfill('0') << class_id; std::string class_folder_path = dataset_path + "/" + folder_oss.str(); if (!fs::exists(class_folder_path)) continue; + size_t taken = 0; for (const auto& entry : fs::directory_iterator(class_folder_path)) { + if (taken >= need_from_class) break; + if (entry.path().extension() == ".png" || entry.path().extension() == ".jpg" || entry.path().extension() == ".jpeg") { @@ -194,24 +219,37 @@ int main(int argc, char* argv[]) { prepare_image(image, input_shape, model_name); const std::vector& image_data = *prepared_tensor.as(); - std::copy(image_data.begin(), image_data.end(), - all_image_data.begin() + current_index * image_size); + all_image_data.insert(all_image_data.end(), image_data.begin(), + image_data.end()); image_paths.push_back(entry.path().string()); true_labels.push_back(class_id); - current_index++; + taken++; + total_images++; } } + + if (taken < need_from_class) { + std::cout << "Warning: Class " << class_id << " has only " << taken + << " images (needed " << need_from_class << ")" << std::endl; + } + } + + if (total_images != numPhoto) { + std::cout << "Warning: Requested " << numPhoto << " images but loaded " + << total_images << " due to insufficient data" << std::endl; + numPhoto = total_images; } it_lab_ai::Shape input_shape_imagenet( - {total_images, static_cast(channels), static_cast(height), - static_cast(width)}); + {static_cast(numPhoto), static_cast(channels), + static_cast(height), static_cast(width)}); it_lab_ai::Tensor input = it_lab_ai::make_tensor(all_image_data, input_shape_imagenet); size_t output_classes = 1000; - it_lab_ai::Shape output_shape({total_images, output_classes}); + it_lab_ai::Shape output_shape( + {static_cast(numPhoto), output_classes}); it_lab_ai::Tensor output = it_lab_ai::Tensor(output_shape, it_lab_ai::Type::kFloat); @@ -219,10 +257,11 @@ int main(int argc, char* argv[]) { build_graph(graph, input, output, json_path, options, false); graph.inference(options); print_time_stats(graph); + std::vector> processed_outputs; const std::vector& raw_output = *output.as(); - for (size_t i = 0; i < total_images; ++i) { + for (size_t i = 0; i < static_cast(numPhoto); ++i) { std::vector single_output( raw_output.begin() + i * output_classes, raw_output.begin() + (i + 1) * output_classes); @@ -262,14 +301,14 @@ int main(int argc, char* argv[]) { } double final_accuracy_top1 = - (static_cast(correct_predictions_top1) / total_images) * 100; + (static_cast(correct_predictions_top1) / numPhoto) * 100; double final_accuracy_top5 = - (static_cast(correct_predictions_top5) / total_images) * 100; + (static_cast(correct_predictions_top5) / numPhoto) * 100; std::cout << "\nFinal Results:" << '\n'; std::cout << "Model: " << model_name << '\n'; std::cout << "Dataset: " << dataset_path << '\n'; - std::cout << "Total images: " << total_images << '\n'; + std::cout << "Total images: " << numPhoto << '\n'; std::cout << "Correct predictions (Top-1): " << correct_predictions_top1 << '\n'; std::cout << "Correct predictions (Top-5): " << correct_predictions_top5 @@ -280,4 +319,4 @@ int main(int argc, char* argv[]) { << final_accuracy_top5 << "%" << '\n'; return 0; -} \ No newline at end of file +} From fe9785aa03740b0ee3c4b6c056e0bdca605fca15 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 24 Feb 2026 12:19:43 +0300 Subject: [PATCH 02/38] cl --- app/Graph/acc_check.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index 20ee1cbd..dbed4f62 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -59,7 +59,7 @@ int main(int argc, char* argv[]) { } } catch (const std::exception& e) { std::cerr << "Error: Invalid numeric argument: " << argv[i] - << ". Using default value: 1000" << e.what()<(j, i); + res[(a) * 28 * 28 + i * 28 + j] = channels[0].at(j, i); } } } From d76e10dc04866409461af1d2a4caa9fec232d121 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 24 Feb 2026 12:24:06 +0300 Subject: [PATCH 03/38] numPhoto --- app/Graph/acc_check.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index dbed4f62..9b4c0ee5 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -13,7 +13,7 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; RuntimeOptions options; - int numPhoto = 1000; + size_t numPhoto = 1000; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { @@ -183,7 +183,6 @@ int main(int argc, char* argv[]) { image_paths.reserve(numPhoto); true_labels.reserve(numPhoto); - size_t current_index = 0; total_images = 0; for (int class_id = 0; class_id < 1000; ++class_id) { From 25c23eb1bff61890591ce5f347cb47635b249830 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 24 Feb 2026 14:54:12 +0300 Subject: [PATCH 04/38] --model in py_onnx --- app/Converters/parser_onnx.py | 42 +++++++++++++++++++++++++++++++---- 1 file changed, 38 insertions(+), 4 deletions(-) diff --git a/app/Converters/parser_onnx.py b/app/Converters/parser_onnx.py index 33b20fef..0c0b931a 100644 --- a/app/Converters/parser_onnx.py +++ b/app/Converters/parser_onnx.py @@ -1,9 +1,15 @@ +#!/usr/bin/env python3 import json -import onnx import os +import sys +import argparse +import onnx +import numpy as np from onnx import TensorProto from onnx import helper, numpy_helper from ultralytics import YOLO +import tensorflow as tf +from tensorflow.keras.models import load_model def convert_pt_to_onnx(pt_model_path, onnx_model_path=None): @@ -163,10 +169,38 @@ def default(self, obj): print(f"Модель успешно сохранена в {output_json_path}") +parser = argparse.ArgumentParser(description='Конвертация моделей в JSON формат') +parser.add_argument('model_name', type=str, + choices=['googlenet', 'densenet', 'resnet', 'yolo', 'alexnet'], + help='Имя модели для обработки') + +args = parser.parse_args() BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +MODELS_DIR = os.path.join(BASE_DIR, 'docs', 'models') +JSONS_DIR = os.path.join(BASE_DIR, 'docs', 'jsons') + +os.makedirs(MODELS_DIR, exist_ok=True) +os.makedirs(JSONS_DIR, exist_ok=True) + +model_files = { + 'googlenet': 'GoogLeNet.onnx', + 'densenet': 'densenet121_Opset16.onnx', + 'resnet': 'resnest101e_Opset16.onnx', + 'yolo': 'yolo11x-cls.pt' +} + +output_files = { + 'googlenet': 'googlenet_onnx_model.json', + 'densenet': 'densenet121_Opset16_onnx_model.json', + 'resnet': 'resnest101e_Opset16_onnx_model.json', + 'yolo': 'yolo11x-cls_onnx_model.json', +} + +model_filename = model_files[args.model_name] +output_filename = output_files[args.model_name] -MODEL_PATH = os.path.join(BASE_DIR, 'docs\\models', 'resnest101e_Opset16.onnx') -MODEL_DATA_PATH = os.path.join(BASE_DIR, 'docs\\jsons', 'resnest101e_Opset16_onnx_model.json') +model_path = os.path.join(MODELS_DIR, model_filename) +output_path = os.path.join(JSONS_DIR, output_filename) -onnx_to_json(MODEL_PATH, MODEL_DATA_PATH) \ No newline at end of file +onnx_to_json(model_path, output_path) From 93bbb72512a11c5fc7233b67da22cd99f911e031 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 24 Feb 2026 16:20:30 +0300 Subject: [PATCH 05/38] load dataset --- app/Converters/download_imagenet.py | 32 +++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 app/Converters/download_imagenet.py diff --git a/app/Converters/download_imagenet.py b/app/Converters/download_imagenet.py new file mode 100644 index 00000000..96e1dc8c --- /dev/null +++ b/app/Converters/download_imagenet.py @@ -0,0 +1,32 @@ +# app/Converters/download_imagenet.py +import os +from datasets import load_dataset +from PIL import Image + + +base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +output_dir = os.path.join(base_dir, 'docs', 'imagenet-paste', 'validation') +os.makedirs(output_dir, exist_ok=True) + +print("Загрузка датасета helenqu/ImageNet-Paste...") +ds = load_dataset( + "helenqu/ImageNet-Paste", + split="validation", + trust_remote_code=True +) + +print(f"Датасет загружен. Всего записей: {len(ds)}") + +for i, item in enumerate(ds): + try: + image = item['image'] + + output_path = os.path.join(output_dir, f"image_{i}.jpg") + image.save(output_path, 'JPEG') + + except Exception as e: + print(f"Ошибка при сохранении изображения {i}: {e}") + continue + +print(f"\n✅ Готово! Сохранено {len(ds)} изображений в {output_dir}") +print(f"Размер датасета: {len(ds)} изображений") \ No newline at end of file From 6d562cf5fe5c56379c89156074882d6fcdd95440 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 24 Feb 2026 16:22:47 +0300 Subject: [PATCH 06/38] reqs.txt --- app/Converters/requirements.txt | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/app/Converters/requirements.txt b/app/Converters/requirements.txt index 7c48b536..b447200c 100644 --- a/app/Converters/requirements.txt +++ b/app/Converters/requirements.txt @@ -6,4 +6,6 @@ torch==2.2.1+cpu torchvision==0.17.1+cpu ultralytics>=8.0.0 numpy>=1.21.0 -protobuf>=3.20.0 \ No newline at end of file +protobuf>=3.20.0 +datasets>=2.14.0 +Pillow>=10.0.0 \ No newline at end of file From 2929f06d3428d77d581d0476ec7ef74072b47565 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 24 Feb 2026 16:30:25 +0300 Subject: [PATCH 07/38] load dataset --- app/Converters/download_imagenet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/Converters/download_imagenet.py b/app/Converters/download_imagenet.py index 96e1dc8c..059478e8 100644 --- a/app/Converters/download_imagenet.py +++ b/app/Converters/download_imagenet.py @@ -5,7 +5,7 @@ base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -output_dir = os.path.join(base_dir, 'docs', 'imagenet-paste', 'validation') +output_dir = os.path.join(base_dir, 'docs', 'ImageNet', 'test') # Изменено здесь os.makedirs(output_dir, exist_ok=True) print("Загрузка датасета helenqu/ImageNet-Paste...") From a4d16112b956717314bfd1a2abc35b20f15b279d Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Tue, 24 Feb 2026 16:31:26 +0300 Subject: [PATCH 08/38] add onnx acc --- .github/workflows/ci.yml | 157 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 156 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index c73d242e..5387f823 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -229,7 +229,7 @@ jobs: token: ${{ secrets.CODECOV_TOKEN }} slug: embedded-dev-research/ITLabAI - evaluate-model: + evaluate-model-alexnet: runs-on: ubuntu-latest needs: [build-linux] permissions: @@ -319,3 +319,158 @@ jobs: git commit -m "[CI] Update accuracy: $(cat accuracy_value.txt)" git push origin master fi + + evaluate-models-onnx: + runs-on: ubuntu-latest + needs: [build-linux] + permissions: + contents: write + + strategy: + matrix: + model: [googlenet, densenet, resnet, yolo] + include: + - model: googlenet + parser: parser_onnx.py + model_file: GoogLeNet.onnx + model_path: docs/models/GoogLeNet.onnx + extra_args: "--onednn 10000" + - model: densenet + parser: parser_onnx.py + model_file: densenet121_Opset16.onnx + model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/densenet121_Opset16_timm/densenet121_Opset16.onnx?download= + extra_args: "--onednn 10000" + - model: resnet + parser: parser_onnx.py + model_file: resnest101e_Opset16.onnx + model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/resnest101e_Opset16_timm/resnest101e_Opset16.onnx?download= + extra_args: "--onednn 10000" + - model: yolo + parser: parser_onnx.py + model_file: yolo11x-cls.pt + model_url: https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo11x-cls.pt + extra_args: "--onednn 10000" + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Download binary and libs + uses: actions/download-artifact@v4 + with: + name: mnist-RELEASE + path: build/ + + - name: Set binary path + id: set_eval_binary + run: | + echo "EVAL_BINARY=build/bin/ACC" >> $GITHUB_OUTPUT + + - name: Install system dependencies + run: | + sudo apt-get update + sudo apt-get install -y libgtk-3-0 libtbb12 libjpeg-dev libpng-dev libtiff-dev libopenjp2-7 libdnnl3 + sudo ldconfig + + - name: Download model + run: | + mkdir -p docs/models + echo "Скачивание ${{ matrix.model_file }} из ${{ matrix.model_url }}" + wget -O docs/models/${{ matrix.model.file }} ${{ matrix.model.url }} + ls -la docs/models/ + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' + + - name: Install Python dependencies from requirements.txt + run: | + cd app/Converters + pip install -r requirements.txt + cd ../.. + + - name: Generate model JSON + run: | + mkdir -p docs/jsons + cd app/Converters + python ${{ matrix.parser }} ${{ matrix.model }} + cd ../.. + echo "Сгенерированные JSON файлы:" + ls -la docs/jsons/ + + - name: Download ImageNet-Paste dataset + run: | + mkdir -p docs/ImageNet/test # Изменено с imagenet-paste/validation на ImageNet/test + cd app/Converters + python download_imagenet.py + cd ../.. + echo "📊 Статистика датасета:" + echo "Всего изображений: $(ls docs/ImageNet/test/*.jpg 2>/dev/null | wc -l)" + + - name: Prepare environment + run: | + chmod +x "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" + echo "LD_LIBRARY_PATH=$PWD/build/bin/all_libs:/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV + + - name: Run evaluation + run: | + DATASET_PATH="docs/ImageNet/test" # Изменено с imagenet-paste/validation на ImageNet/test + + echo "Запуск оценки для модели ${{ matrix.model }}" + echo "Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model ${{ matrix.model }} ${{ matrix.extra_args }}" + + "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" \ + --model ${{ matrix.model }} \ + ${{ matrix.extra_args }} > accuracy_${{ matrix.model }}.txt 2>&1 + + if [ $? -ne 0 ]; then + echo "Ошибка при оценке модели ${{ matrix.model }}" + cat accuracy_${{ matrix.model }}.txt + exit 1 + fi + + echo "Результат оценки:" + cat accuracy_${{ matrix.model }}.txt + + - name: Extract accuracy value + run: | + ACCURACY=$(grep -oE '[0-9]+\.?[0-9]*%' accuracy_${{ matrix.model }}.txt | head -1 || echo "0%") + echo "$ACCURACY" > accuracy_value_${{ matrix.model }}.txt + echo "Accuracy for ${{ matrix.model }}: $ACCURACY" + + - name: Upload accuracy artifacts + uses: actions/upload-artifact@v4 + with: + name: accuracy-${{ matrix.model }} + path: | + accuracy_${{ matrix.model }}.txt + accuracy_value_${{ matrix.model }}.txt + + - name: Update README for model (master only) + if: github.ref == 'refs/heads/master' + run: | + ACCURACY=$(cat accuracy_value_${{ matrix.model }}.txt | sed 's/%//g') + DATE=$(date '+%Y-%m-%d') + + if grep -q "" README.md; then + sed -i "s/.*/Accuracy: ${ACCURACY}% (updated: ${DATE})/" README.md + echo "Обновлена точность для ${{ matrix.model }} в README" + else + echo "Плейсхолдер для ${{ matrix.model }} не найден в README, добавляем в конец" + echo -e "\n## ${{ matrix.model }} Accuracy\nAccuracy: ${ACCURACY}% (updated: ${DATE})\n" >> README.md + fi + + - name: Commit and push changes (master only) + if: github.ref == 'refs/heads/master' + run: | + git config --global user.name "GitHub Actions" + git config --global user.email "actions@github.com" + git add README.md + if git diff-index --quiet HEAD --; then + echo "No changes to commit" + else + git commit -m "[CI] Update accuracy for ${{ matrix.model }}: $(cat accuracy_value_${{ matrix.model }}.txt)" + git push origin master + fi From 69c19fa07df83de9127e9d6f4517aadca5290541 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Fri, 27 Feb 2026 10:28:26 +0300 Subject: [PATCH 09/38] format --- .github/workflows/ci.yml | 31 +++++++++++++++---------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5387f823..b09fde45 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -402,12 +402,11 @@ jobs: - name: Download ImageNet-Paste dataset run: | - mkdir -p docs/ImageNet/test # Изменено с imagenet-paste/validation на ImageNet/test - cd app/Converters - python download_imagenet.py - cd ../.. - echo "📊 Статистика датасета:" - echo "Всего изображений: $(ls docs/ImageNet/test/*.jpg 2>/dev/null | wc -l)" + mkdir -p docs/ImageNet/test # Изменено с imagenet-paste/validation на ImageNet/test + cd app/Converters + python download_imagenet.py + cd ../.. + echo "Всего изображений: $(ls docs/ImageNet/test/*.jpg 2>/dev/null | wc -l)" - name: Prepare environment run: | @@ -416,23 +415,23 @@ jobs: - name: Run evaluation run: | - DATASET_PATH="docs/ImageNet/test" # Изменено с imagenet-paste/validation на ImageNet/test + DATASET_PATH="docs/ImageNet/test" # Изменено с imagenet-paste/validation на ImageNet/test - echo "Запуск оценки для модели ${{ matrix.model }}" - echo "Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model ${{ matrix.model }} ${{ matrix.extra_args }}" + echo "Запуск оценки для модели ${{ matrix.model }}" + echo "Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model ${{ matrix.model }} ${{ matrix.extra_args }}" - "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" \ - --model ${{ matrix.model }} \ - ${{ matrix.extra_args }} > accuracy_${{ matrix.model }}.txt 2>&1 + "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" \ + --model ${{ matrix.model }} \ + ${{ matrix.extra_args }} > accuracy_${{ matrix.model }}.txt 2>&1 - if [ $? -ne 0 ]; then + if [ $? -ne 0 ]; then echo "Ошибка при оценке модели ${{ matrix.model }}" cat accuracy_${{ matrix.model }}.txt exit 1 - fi + fi - echo "Результат оценки:" - cat accuracy_${{ matrix.model }}.txt + echo "Результат оценки:" + cat accuracy_${{ matrix.model }}.txt - name: Extract accuracy value run: | From a5838d9dbd886873a8a84d88255f1e71ba05a1ba Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Fri, 27 Feb 2026 10:47:48 +0300 Subject: [PATCH 10/38] format --- .github/workflows/ci.yml | 257 +++++++++++++++++++-------------------- 1 file changed, 128 insertions(+), 129 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b09fde45..1487872e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -325,151 +325,150 @@ jobs: needs: [build-linux] permissions: contents: write - - strategy: - matrix: - model: [googlenet, densenet, resnet, yolo] + strategy: + matrix: + model: [googlenet, densenet, resnet, yolo] include: - - model: googlenet - parser: parser_onnx.py - model_file: GoogLeNet.onnx - model_path: docs/models/GoogLeNet.onnx - extra_args: "--onednn 10000" - - model: densenet - parser: parser_onnx.py - model_file: densenet121_Opset16.onnx - model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/densenet121_Opset16_timm/densenet121_Opset16.onnx?download= - extra_args: "--onednn 10000" - - model: resnet - parser: parser_onnx.py - model_file: resnest101e_Opset16.onnx - model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/resnest101e_Opset16_timm/resnest101e_Opset16.onnx?download= - extra_args: "--onednn 10000" - - model: yolo - parser: parser_onnx.py - model_file: yolo11x-cls.pt - model_url: https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo11x-cls.pt - extra_args: "--onednn 10000" + - model: googlenet + parser: parser_onnx.py + model_file: GoogLeNet.onnx + model_path: docs/models/GoogLeNet.onnx + extra_args: "--onednn 10000" + - model: densenet + parser: parser_onnx.py + model_file: densenet121_Opset16.onnx + model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/densenet121_Opset16_timm/densenet121_Opset16.onnx?download= + extra_args: "--onednn 10000" + - model: resnet + parser: parser_onnx.py + model_file: resnest101e_Opset16.onnx + model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/resnest101e_Opset16_timm/resnest101e_Opset16.onnx?download= + extra_args: "--onednn 10000" + - model: yolo + parser: parser_onnx.py + model_file: yolo11x-cls.pt + model_url: https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo11x-cls.pt + extra_args: "--onednn 10000" - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 - - name: Download binary and libs - uses: actions/download-artifact@v4 - with: - name: mnist-RELEASE - path: build/ + - name: Download binary and libs + uses: actions/download-artifact@v4 + with: + name: mnist-RELEASE + path: build/ - - name: Set binary path - id: set_eval_binary - run: | - echo "EVAL_BINARY=build/bin/ACC" >> $GITHUB_OUTPUT + - name: Set binary path + id: set_eval_binary + run: | + echo "EVAL_BINARY=build/bin/ACC" >> $GITHUB_OUTPUT - - name: Install system dependencies - run: | - sudo apt-get update - sudo apt-get install -y libgtk-3-0 libtbb12 libjpeg-dev libpng-dev libtiff-dev libopenjp2-7 libdnnl3 - sudo ldconfig + - name: Install system dependencies + run: | + sudo apt-get update + sudo apt-get install -y libgtk-3-0 libtbb12 libjpeg-dev libpng-dev libtiff-dev libopenjp2-7 libdnnl3 + sudo ldconfig - - name: Download model - run: | - mkdir -p docs/models - echo "Скачивание ${{ matrix.model_file }} из ${{ matrix.model_url }}" - wget -O docs/models/${{ matrix.model.file }} ${{ matrix.model.url }} - ls -la docs/models/ + - name: Download model + run: | + mkdir -p docs/models + echo "Скачивание ${{ matrix.model_file }} из ${{ matrix.model_url }}" + wget -O docs/models/${{ matrix.model_file }} ${{ matrix.model_url }} + ls -la docs/models/ - - name: Setup Python - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - - name: Install Python dependencies from requirements.txt - run: | - cd app/Converters - pip install -r requirements.txt - cd ../.. + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.10' - - name: Generate model JSON - run: | - mkdir -p docs/jsons - cd app/Converters - python ${{ matrix.parser }} ${{ matrix.model }} - cd ../.. - echo "Сгенерированные JSON файлы:" - ls -la docs/jsons/ + - name: Install Python dependencies from requirements.txt + run: | + cd app/Converters + pip install -r requirements.txt + cd ../.. - - name: Download ImageNet-Paste dataset - run: | - mkdir -p docs/ImageNet/test # Изменено с imagenet-paste/validation на ImageNet/test - cd app/Converters - python download_imagenet.py - cd ../.. - echo "Всего изображений: $(ls docs/ImageNet/test/*.jpg 2>/dev/null | wc -l)" + - name: Generate model JSON + run: | + mkdir -p docs/jsons + cd app/Converters + python ${{ matrix.parser }} ${{ matrix.model }} + cd ../.. + echo "Сгенерированные JSON файлы:" + ls -la docs/jsons/ - - name: Prepare environment - run: | - chmod +x "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" - echo "LD_LIBRARY_PATH=$PWD/build/bin/all_libs:/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV - - - name: Run evaluation - run: | - DATASET_PATH="docs/ImageNet/test" # Изменено с imagenet-paste/validation на ImageNet/test + - name: Download ImageNet-Paste dataset + run: | + mkdir -p docs/ImageNet/test # Изменено с imagenet-paste/validation на ImageNet/test + cd app/Converters + python download_imagenet.py + cd ../.. + echo "Всего изображений: $(ls docs/ImageNet/test/*.jpg 2>/dev/null | wc -l)" + + - name: Prepare environment + run: | + chmod +x "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" + echo "LD_LIBRARY_PATH=$PWD/build/bin/all_libs:/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV + + - name: Run evaluation + run: | + DATASET_PATH="docs/ImageNet/test" # Изменено с imagenet-paste/validation на ImageNet/test - echo "Запуск оценки для модели ${{ matrix.model }}" - echo "Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model ${{ matrix.model }} ${{ matrix.extra_args }}" + echo "Запуск оценки для модели ${{ matrix.model }}" + echo "Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model ${{ matrix.model }} ${{ matrix.extra_args }}" - "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" \ - --model ${{ matrix.model }} \ - ${{ matrix.extra_args }} > accuracy_${{ matrix.model }}.txt 2>&1 + "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" \ + --model ${{ matrix.model }} \ + ${{ matrix.extra_args }} > accuracy_${{ matrix.model }}.txt 2>&1 - if [ $? -ne 0 ]; then - echo "Ошибка при оценке модели ${{ matrix.model }}" - cat accuracy_${{ matrix.model }}.txt - exit 1 - fi + if [ $? -ne 0 ]; then + echo "Ошибка при оценке модели ${{ matrix.model }}" + cat accuracy_${{ matrix.model }}.txt + exit 1 + fi - echo "Результат оценки:" - cat accuracy_${{ matrix.model }}.txt + echo "Результат оценки:" + cat accuracy_${{ matrix.model }}.txt - - name: Extract accuracy value - run: | - ACCURACY=$(grep -oE '[0-9]+\.?[0-9]*%' accuracy_${{ matrix.model }}.txt | head -1 || echo "0%") - echo "$ACCURACY" > accuracy_value_${{ matrix.model }}.txt - echo "Accuracy for ${{ matrix.model }}: $ACCURACY" + - name: Extract accuracy value + run: | + ACCURACY=$(grep -oE '[0-9]+\.?[0-9]*%' accuracy_${{ matrix.model }}.txt | head -1 || echo "0%") + echo "$ACCURACY" > accuracy_value_${{ matrix.model }}.txt + echo "Accuracy for ${{ matrix.model }}: $ACCURACY" - - name: Upload accuracy artifacts - uses: actions/upload-artifact@v4 - with: - name: accuracy-${{ matrix.model }} - path: | - accuracy_${{ matrix.model }}.txt - accuracy_value_${{ matrix.model }}.txt + - name: Upload accuracy artifacts + uses: actions/upload-artifact@v4 + with: + name: accuracy-${{ matrix.model }} + path: | + accuracy_${{ matrix.model }}.txt + accuracy_value_${{ matrix.model }}.txt - - name: Update README for model (master only) - if: github.ref == 'refs/heads/master' - run: | - ACCURACY=$(cat accuracy_value_${{ matrix.model }}.txt | sed 's/%//g') - DATE=$(date '+%Y-%m-%d') + - name: Update README for model (master only) + if: github.ref == 'refs/heads/master' + run: | + ACCURACY=$(cat accuracy_value_${{ matrix.model }}.txt | sed 's/%//g') + DATE=$(date '+%Y-%m-%d') - if grep -q "" README.md; then - sed -i "s/.*/Accuracy: ${ACCURACY}% (updated: ${DATE})/" README.md - echo "Обновлена точность для ${{ matrix.model }} в README" - else - echo "Плейсхолдер для ${{ matrix.model }} не найден в README, добавляем в конец" - echo -e "\n## ${{ matrix.model }} Accuracy\nAccuracy: ${ACCURACY}% (updated: ${DATE})\n" >> README.md - fi + if grep -q "" README.md; then + sed -i "s/.*/Accuracy: ${ACCURACY}% (updated: ${DATE})/" README.md + echo "Обновлена точность для ${{ matrix.model }} в README" + else + echo "Плейсхолдер для ${{ matrix.model }} не найден в README, добавляем в конец" + echo -e "\n## ${{ matrix.model }} Accuracy\nAccuracy: ${ACCURACY}% (updated: ${DATE})\n" >> README.md + fi - - name: Commit and push changes (master only) - if: github.ref == 'refs/heads/master' - run: | - git config --global user.name "GitHub Actions" - git config --global user.email "actions@github.com" - git add README.md - if git diff-index --quiet HEAD --; then - echo "No changes to commit" - else - git commit -m "[CI] Update accuracy for ${{ matrix.model }}: $(cat accuracy_value_${{ matrix.model }}.txt)" - git push origin master - fi + - name: Commit and push changes (master only) + if: github.ref == 'refs/heads/master' + run: | + git config --global user.name "GitHub Actions" + git config --global user.email "actions@github.com" + git add README.md + if git diff-index --quiet HEAD --; then + echo "No changes to commit" + else + git commit -m "[CI] Update accuracy for ${{ matrix.model }}: $(cat accuracy_value_${{ matrix.model }}.txt)" + git push origin master + fi From 3f9fa3940dbbf5afc0348c021200dfdca844e875 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Fri, 27 Feb 2026 11:39:19 +0300 Subject: [PATCH 11/38] googlenet --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1487872e..4202bf3c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -333,6 +333,7 @@ jobs: parser: parser_onnx.py model_file: GoogLeNet.onnx model_path: docs/models/GoogLeNet.onnx + model_url: '' extra_args: "--onednn 10000" - model: densenet parser: parser_onnx.py @@ -372,7 +373,8 @@ jobs: sudo apt-get install -y libgtk-3-0 libtbb12 libjpeg-dev libpng-dev libtiff-dev libopenjp2-7 libdnnl3 sudo ldconfig - - name: Download model + - name: Download model (if URL provided) + if: matrix.model_url != '' run: | mkdir -p docs/models echo "Скачивание ${{ matrix.model_file }} из ${{ matrix.model_url }}" From 5c4e528a5cb3dd092675be6ffbcb9b6d16639bbf Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Fri, 27 Feb 2026 12:44:28 +0300 Subject: [PATCH 12/38] reqs --- app/Converters/requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/Converters/requirements.txt b/app/Converters/requirements.txt index b447200c..622b0e7d 100644 --- a/app/Converters/requirements.txt +++ b/app/Converters/requirements.txt @@ -5,7 +5,7 @@ onnx>=1.15.0 torch==2.2.1+cpu torchvision==0.17.1+cpu ultralytics>=8.0.0 -numpy>=1.21.0 +numpy>=1.21.0,<2.0.0 protobuf>=3.20.0 datasets>=2.14.0 Pillow>=10.0.0 \ No newline at end of file From 7b969235d0a46096a909db1d5cfd9ae1eaa926ea Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Fri, 27 Feb 2026 13:21:20 +0300 Subject: [PATCH 13/38] cashe --- .github/workflows/ci.yml | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4202bf3c..a9901ce5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -401,9 +401,20 @@ jobs: echo "Сгенерированные JSON файлы:" ls -la docs/jsons/ + - name: Cache ImageNet-Paste dataset + id: cache-imagenet + uses: actions/cache@v4 + with: + path: docs/ImageNet + key: imagenet-paste-v1-${{ hashFiles('app/Converters/download_imagenet.py') }} + restore-keys: | + imagenet-paste-v1- + imagenet-paste- + - name: Download ImageNet-Paste dataset + if: steps.cache-imagenet.outputs.cache-hit != 'true' run: | - mkdir -p docs/ImageNet/test # Изменено с imagenet-paste/validation на ImageNet/test + mkdir -p docs/ImageNet/test cd app/Converters python download_imagenet.py cd ../.. From 392ea50d40c4f601e3d7da22e7acafa8ab1f1a9b Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Fri, 27 Feb 2026 13:29:30 +0300 Subject: [PATCH 14/38] Update ci.yml --- .github/workflows/ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a9901ce5..2894c0d3 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -407,9 +407,6 @@ jobs: with: path: docs/ImageNet key: imagenet-paste-v1-${{ hashFiles('app/Converters/download_imagenet.py') }} - restore-keys: | - imagenet-paste-v1- - imagenet-paste- - name: Download ImageNet-Paste dataset if: steps.cache-imagenet.outputs.cache-hit != 'true' From 6c3b056825d435e4138b10ed96b742c53f606437 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:03:05 +0300 Subject: [PATCH 15/38] secret --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2894c0d3..cb5a6fa1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -410,12 +410,14 @@ jobs: - name: Download ImageNet-Paste dataset if: steps.cache-imagenet.outputs.cache-hit != 'true' + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} run: | mkdir -p docs/ImageNet/test cd app/Converters + # Токен автоматически используется huggingface-hub библиотекой python download_imagenet.py cd ../.. - echo "Всего изображений: $(ls docs/ImageNet/test/*.jpg 2>/dev/null | wc -l)" - name: Prepare environment run: | From 2554a4502156bdc0658a844b3edf21a571251044 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Fri, 27 Feb 2026 16:20:55 +0300 Subject: [PATCH 16/38] usw token --- app/Converters/download_imagenet.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/app/Converters/download_imagenet.py b/app/Converters/download_imagenet.py index 059478e8..03360fa0 100644 --- a/app/Converters/download_imagenet.py +++ b/app/Converters/download_imagenet.py @@ -1,18 +1,24 @@ -# app/Converters/download_imagenet.py import os from datasets import load_dataset +from huggingface_hub import login from PIL import Image +hf_token = os.environ.get('HF_TOKEN') +if hf_token: + print("Авторизация на Hugging Face Hub...") + login(token=hf_token) +else: + print("⚠Внимание: HF_TOKEN не найден, могут быть ограничения rate limiting") base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -output_dir = os.path.join(base_dir, 'docs', 'ImageNet', 'test') # Изменено здесь +output_dir = os.path.join(base_dir, 'docs', 'ImageNet', 'test') os.makedirs(output_dir, exist_ok=True) print("Загрузка датасета helenqu/ImageNet-Paste...") ds = load_dataset( "helenqu/ImageNet-Paste", split="validation", - trust_remote_code=True + token=hf_token ) print(f"Датасет загружен. Всего записей: {len(ds)}") @@ -20,13 +26,15 @@ for i, item in enumerate(ds): try: image = item['image'] - output_path = os.path.join(output_dir, f"image_{i}.jpg") image.save(output_path, 'JPEG') + # Прогресс каждые 1000 изображений + if (i + 1) % 1000 == 0: + print(f"Сохранено {i + 1}/{len(ds)} изображений...") + except Exception as e: print(f"Ошибка при сохранении изображения {i}: {e}") continue -print(f"\n✅ Готово! Сохранено {len(ds)} изображений в {output_dir}") -print(f"Размер датасета: {len(ds)} изображений") \ No newline at end of file +print(f"\nГотово! Сохранено {len(ds)} изображений в {output_dir}") \ No newline at end of file From aab38cc2662f0d3a240491bb7015b0a1994d5730 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Sat, 28 Feb 2026 11:15:01 +0300 Subject: [PATCH 17/38] restore_keys --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cb5a6fa1..5632e8ef 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -407,6 +407,10 @@ jobs: with: path: docs/ImageNet key: imagenet-paste-v1-${{ hashFiles('app/Converters/download_imagenet.py') }} + restore-keys: | + imagenet-paste-v1-${{ hashFiles('app/Converters/download_imagenet.py') }}- + imagenet-paste-v1- + imagenet-paste- - name: Download ImageNet-Paste dataset if: steps.cache-imagenet.outputs.cache-hit != 'true' From 978481feb34e70fb0068defc3b83d0b13629f9e2 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Mon, 2 Mar 2026 14:48:00 +0300 Subject: [PATCH 18/38] Update ci.yml --- .github/workflows/ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5632e8ef..15f8eef9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -326,6 +326,7 @@ jobs: permissions: contents: write strategy: + fail-fast: false matrix: model: [googlenet, densenet, resnet, yolo] include: From 7748c55dd70e34054ac1e5b5b5b6524c1ca50fda Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Mon, 2 Mar 2026 15:05:03 +0300 Subject: [PATCH 19/38] restore-save --- .github/workflows/ci.yml | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 15f8eef9..20b49ef6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -403,27 +403,32 @@ jobs: ls -la docs/jsons/ - name: Cache ImageNet-Paste dataset - id: cache-imagenet - uses: actions/cache@v4 + id: cache-imagenet-restore + uses: actions/cache/restore@v4 with: path: docs/ImageNet key: imagenet-paste-v1-${{ hashFiles('app/Converters/download_imagenet.py') }} restore-keys: | - imagenet-paste-v1-${{ hashFiles('app/Converters/download_imagenet.py') }}- imagenet-paste-v1- imagenet-paste- - name: Download ImageNet-Paste dataset - if: steps.cache-imagenet.outputs.cache-hit != 'true' + if: steps.cache-imagenet-restore.outputs.cache-hit != 'true' env: HF_TOKEN: ${{ secrets.HF_TOKEN }} run: | mkdir -p docs/ImageNet/test cd app/Converters - # Токен автоматически используется huggingface-hub библиотекой python download_imagenet.py cd ../.. + - name: Save ImageNet-Paste dataset + if: steps.cache-imagenet-restore.outputs.cache-hit != 'true' + uses: actions/cache/save@v4 + with: + path: docs/ImageNet + key: imagenet-paste-v1-${{ hashFiles('app/Converters/download_imagenet.py') }} + - name: Prepare environment run: | chmod +x "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" @@ -431,7 +436,7 @@ jobs: - name: Run evaluation run: | - DATASET_PATH="docs/ImageNet/test" # Изменено с imagenet-paste/validation на ImageNet/test + DATASET_PATH="docs/ImageNet/test" echo "Запуск оценки для модели ${{ matrix.model }}" echo "Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model ${{ matrix.model }} ${{ matrix.extra_args }}" From c56137437b9152f7c6b495350f92480802e19b5e Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Tue, 3 Mar 2026 15:21:19 +0300 Subject: [PATCH 20/38] debug --- .github/workflows/ci.yml | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 20b49ef6..2e9986db 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -335,22 +335,22 @@ jobs: model_file: GoogLeNet.onnx model_path: docs/models/GoogLeNet.onnx model_url: '' - extra_args: "--onednn 10000" + extra_args: "--onednn 100" - model: densenet parser: parser_onnx.py model_file: densenet121_Opset16.onnx model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/densenet121_Opset16_timm/densenet121_Opset16.onnx?download= - extra_args: "--onednn 10000" + extra_args: "--onednn 100" - model: resnet parser: parser_onnx.py model_file: resnest101e_Opset16.onnx model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/resnest101e_Opset16_timm/resnest101e_Opset16.onnx?download= - extra_args: "--onednn 10000" + extra_args: "--onednn 100" - model: yolo parser: parser_onnx.py model_file: yolo11x-cls.pt model_url: https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo11x-cls.pt - extra_args: "--onednn 10000" + extra_args: "--onednn 100" steps: - uses: actions/checkout@v4 @@ -401,6 +401,17 @@ jobs: cd ../.. echo "Сгенерированные JSON файлы:" ls -la docs/jsons/ + + - name: Validate densenet model + run: | + echo "Проверка модели densenet:" + ls -la docs/models/densenet121_Opset16.onnx + file docs/models/densenet121_Opset16.onnx + echo "Размер: $(wc -c < docs/models/densenet121_Opset16.onnx) байт" + + echo "Проверка JSON:" + ls -la docs/jsons/densenet121_Opset16_onnx_model.json + head -20 docs/jsons/densenet121_Opset16_onnx_model.json - name: Cache ImageNet-Paste dataset id: cache-imagenet-restore @@ -433,7 +444,17 @@ jobs: run: | chmod +x "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" echo "LD_LIBRARY_PATH=$PWD/build/bin/all_libs:/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV - + - name: Debug dataset before evaluation + run: | + echo "Проверка датасета:" + echo "Путь: docs/ImageNet/test/" + ls -la docs/ImageNet/test/ | head -20 + echo "Количество .jpg файлов: $(find docs/ImageNet/test/ -name "*.jpg" | wc -l)" + echo "Права доступа:" + ls -ld docs/ImageNet/test/ + echo "Первый файл (если есть):" + find docs/ImageNet/test/ -name "*.jpg" | head -1 | xargs file || echo "Нет файлов" + - name: Run evaluation run: | DATASET_PATH="docs/ImageNet/test" From 4ff9e1db6e983b23689a381879b138b67cd0c8cd Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Tue, 3 Mar 2026 15:55:02 +0300 Subject: [PATCH 21/38] check del --- .github/workflows/ci.yml | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2e9986db..f9677f62 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -401,17 +401,6 @@ jobs: cd ../.. echo "Сгенерированные JSON файлы:" ls -la docs/jsons/ - - - name: Validate densenet model - run: | - echo "Проверка модели densenet:" - ls -la docs/models/densenet121_Opset16.onnx - file docs/models/densenet121_Opset16.onnx - echo "Размер: $(wc -c < docs/models/densenet121_Opset16.onnx) байт" - - echo "Проверка JSON:" - ls -la docs/jsons/densenet121_Opset16_onnx_model.json - head -20 docs/jsons/densenet121_Opset16_onnx_model.json - name: Cache ImageNet-Paste dataset id: cache-imagenet-restore @@ -444,6 +433,7 @@ jobs: run: | chmod +x "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" echo "LD_LIBRARY_PATH=$PWD/build/bin/all_libs:/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV + - name: Debug dataset before evaluation run: | echo "Проверка датасета:" From 9dc92bb0559b61e4ec952e506305049bb9e835fc Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Tue, 3 Mar 2026 15:55:32 +0300 Subject: [PATCH 22/38] download to folders --- app/Converters/download_imagenet.py | 84 ++++++++++++++++++++++++++--- 1 file changed, 77 insertions(+), 7 deletions(-) diff --git a/app/Converters/download_imagenet.py b/app/Converters/download_imagenet.py index 03360fa0..c6d9d1a8 100644 --- a/app/Converters/download_imagenet.py +++ b/app/Converters/download_imagenet.py @@ -2,13 +2,14 @@ from datasets import load_dataset from huggingface_hub import login from PIL import Image +from collections import defaultdict hf_token = os.environ.get('HF_TOKEN') if hf_token: print("Авторизация на Hugging Face Hub...") login(token=hf_token) else: - print("⚠Внимание: HF_TOKEN не найден, могут быть ограничения rate limiting") + print("Внимание: HF_TOKEN не найден, могут быть ограничения rate limiting") base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) output_dir = os.path.join(base_dir, 'docs', 'ImageNet', 'test') @@ -23,18 +24,87 @@ print(f"Датасет загружен. Всего записей: {len(ds)}") +print(f"Ключи первой записи: {ds[0].keys()}") +if 'label' in ds[0]: + print(f"Пример метки: {ds[0]['label']}") + print(f"Тип метки: {type(ds[0]['label'])}") + +print("\nСоздание папок для классов 00000-00999 в test/...") +for i in range(1000): + class_folder = os.path.join(output_dir, f"{i:05d}") + os.makedirs(class_folder, exist_ok=True) +print("Папки созданы!") + +counters = defaultdict(int) + +total_images = len(ds) +images_per_class = 50 +max_images = 1000 * images_per_class + +print(f"\nНачинаем сохранение {min(total_images, max_images)} изображений...") +print(f"По {images_per_class} изображений в каждой из 1000 папок") +print(f"Путь: {output_dir}/00000/ ... /00999/") + +saved_count = 0 +skipped_count = 0 + for i, item in enumerate(ds): + if saved_count >= max_images: + print(f"\nДостигнут лимит в {max_images} изображений") + break + try: image = item['image'] - output_path = os.path.join(output_dir, f"image_{i}.jpg") - image.save(output_path, 'JPEG') + if 'label' in item: + class_id = item['label'] + elif 'labels' in item: + class_id = item['labels'] + else: + skipped_count += 1 + if skipped_count % 100 == 0: + print(f"Пропущено {skipped_count} изображений: нет метки класса") + continue - # Прогресс каждые 1000 изображений - if (i + 1) % 1000 == 0: - print(f"Сохранено {i + 1}/{len(ds)} изображений...") + if not isinstance(class_id, (int, float)) or class_id < 0 or class_id >= 1000: + skipped_count += 1 + if skipped_count % 100 == 0: + print(f"Пропущено {skipped_count} изображений: некорректный class_id {class_id}") + continue + + class_id_int = int(class_id) + + if counters[class_id_int] >= images_per_class: + continue + + class_folder = os.path.join(output_dir, f"{class_id_int:05d}") + filename = f"image_{counters[class_id_int]}.jpg" + output_path = os.path.join(class_folder, filename) + + image.save(output_path, 'JPEG') + counters[class_id_int] += 1 + saved_count += 1 except Exception as e: print(f"Ошибка при сохранении изображения {i}: {e}") continue -print(f"\nГотово! Сохранено {len(ds)} изображений в {output_dir}") \ No newline at end of file +print(f"\n{'=' * 50}") +print(f"ГОТОВО!") +print(f"{'=' * 50}") +print(f"Всего сохранено: {saved_count} изображений") +print(f"Пропущено: {skipped_count} изображений") +print(f"Распределение по первым 10 классам:") +for class_id in range(10): + print(f" Класс {class_id:05d}: {counters[class_id]} изображений") + +classes_with_50 = sum(1 for c in range(1000) if counters[c] == 50) +print(f"\nКлассов с ровно 50 изображениями: {classes_with_50}/1000") + +if classes_with_50 < 1000: + print("\nКлассы с недостаточным количеством:") + for class_id in range(1000): + if counters[class_id] < 50 and counters[class_id] > 0: + print(f" Класс {class_id:05d}: только {counters[class_id]} изображений") + +print(f"\nПуть к данным: {output_dir}") +print(f"Пример: {output_dir}/00042/image_0.jpg") \ No newline at end of file From df7b5a75bc3d163211135f79d0b3164914752830 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Wed, 4 Mar 2026 11:18:42 +0300 Subject: [PATCH 23/38] cashe json and 10 for resnet --- .github/workflows/ci.yml | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f9677f62..875eae1f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -345,7 +345,7 @@ jobs: parser: parser_onnx.py model_file: resnest101e_Opset16.onnx model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/resnest101e_Opset16_timm/resnest101e_Opset16.onnx?download= - extra_args: "--onednn 100" + extra_args: "--onednn 10" - model: yolo parser: parser_onnx.py model_file: yolo11x-cls.pt @@ -393,14 +393,23 @@ jobs: pip install -r requirements.txt cd ../.. + - name: Cache model JSON files + id: cache-model-json + uses: actions/cache@v4 + with: + path: docs/jsons + key: model-json-${{ matrix.model }}-${{ hashFiles('app/Converters/parser_onnx.py', 'app/Converters/requirements.txt') }} + restore-keys: | + model-json-${{ matrix.model }}- + model-json- + - name: Generate model JSON + if: steps.cache-model-json.outputs.cache-hit != 'true' run: | mkdir -p docs/jsons cd app/Converters python ${{ matrix.parser }} ${{ matrix.model }} cd ../.. - echo "Сгенерированные JSON файлы:" - ls -la docs/jsons/ - name: Cache ImageNet-Paste dataset id: cache-imagenet-restore @@ -433,17 +442,6 @@ jobs: run: | chmod +x "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" echo "LD_LIBRARY_PATH=$PWD/build/bin/all_libs:/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV - - - name: Debug dataset before evaluation - run: | - echo "Проверка датасета:" - echo "Путь: docs/ImageNet/test/" - ls -la docs/ImageNet/test/ | head -20 - echo "Количество .jpg файлов: $(find docs/ImageNet/test/ -name "*.jpg" | wc -l)" - echo "Права доступа:" - ls -ld docs/ImageNet/test/ - echo "Первый файл (если есть):" - find docs/ImageNet/test/ -name "*.jpg" | head -1 | xargs file || echo "Нет файлов" - name: Run evaluation run: | From 7ff4e1bfa4fea213cdfc011a815b17e502e27fd8 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Wed, 4 Mar 2026 17:08:17 +0300 Subject: [PATCH 24/38] debug --- .github/workflows/ci.yml | 124 +++++++++++++++++++++++++++++++++++---- 1 file changed, 113 insertions(+), 11 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 875eae1f..cb586102 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -446,22 +446,124 @@ jobs: - name: Run evaluation run: | DATASET_PATH="docs/ImageNet/test" + MODEL="${{ matrix.model }}" + EXTRA_ARGS="${{ matrix.extra_args }}" - echo "Запуск оценки для модели ${{ matrix.model }}" - echo "Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model ${{ matrix.model }} ${{ matrix.extra_args }}" + echo "===================================" + echo "🚀 Запуск оценки для модели $MODEL" + echo "📋 Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model $MODEL $EXTRA_ARGS" + echo "===================================" - "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" \ - --model ${{ matrix.model }} \ - ${{ matrix.extra_args }} > accuracy_${{ matrix.model }}.txt 2>&1 + echo "📊 Системная информация до запуска:" + echo "--- Память ---" + free -h + echo "--- CPU ---" + top -bn1 | head -5 + echo "--- Диск ---" + df -h docs/ImageNet/test/ + + # Запускаем процесс с мониторингом + TIMESTAMP=$(date +%s) + MONITOR_LOG="monitor_$MODEL.log" + + # Функция мониторинга + monitor_process() { + local pid=$1 + local log_file=$2 - if [ $? -ne 0 ]; then - echo "Ошибка при оценке модели ${{ matrix.model }}" - cat accuracy_${{ matrix.model }}.txt - exit 1 + echo "📊 Мониторинг процесса $pid..." + + while kill -0 $pid 2>/dev/null; do + # Время + NOW=$(date '+%H:%M:%S') + + # Память процесса + MEM=$(ps -o rss= -p $pid 2>/dev/null | awk '{print $1/1024 " MB"}' || echo "N/A") + + # CPU процесса + CPU=$(ps -o pcpu= -p $pid 2>/dev/null || echo "N/A") + + # Общая память системы + TOTAL_MEM=$(free -m | awk 'NR==2{print $3 " MB / " $2 " MB"}') + + echo "[$NOW] PID: $pid | MEM: $MEM | CPU: $CPU% | System MEM: $TOTAL_MEM" >> $log_file + + sleep 2 + done + + echo "📊 Процесс $pid завершен" >> $log_file + } + + # Запускаем основную программу в фоне + echo "▶️ Запуск ACC в фоновом режиме..." + "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" \ + --model $MODEL \ + $EXTRA_ARGS > accuracy_$MODEL.txt 2>&1 & + ACC_PID=$! + + echo "📊 PID процесса: $ACC_PID" + + # Запускаем мониторинг в фоне + monitor_process $ACC_PID $MONITOR_LOG & + MONITOR_PID=$! + + # Ждем завершения ACC + wait $ACC_PID + EXIT_CODE=$? + + # Убиваем мониторинг + kill $MONITOR_PID 2>/dev/null || true + + echo "===================================" + echo "📊 Результаты мониторинга:" + if [ -f "$MONITOR_LOG" ]; then + cat "$MONITOR_LOG" + else + echo "Лог мониторинга не создан" fi + + echo "===================================" + echo "📊 Системная информация после запуска:" + free -h + + echo "===================================" + echo "📊 Код завершения: $EXIT_CODE" + + if [ $EXIT_CODE -eq 143 ]; then + echo "❌ Ошибка 143 (SIGTERM) - процесс убит системой" + echo "Возможные причины:" + echo " - Нехватка памяти (OOM Killer)" + echo " - Превышение лимитов CPU" + echo " - Timeout от GitHub Actions" + + # Проверка OOM Killer + if sudo dmesg | tail -20 | grep -i "killed process" | grep -q "$ACC_PID"; then + echo "✅ Подтверждено: процесс убит OOM Killer" + sudo dmesg | tail -20 | grep -i "killed process" | tail -5 + else + echo "❌ Не найдено подтверждение OOM Killer в логах" + sudo dmesg | tail -20 + fi - echo "Результат оценки:" - cat accuracy_${{ matrix.model }}.txt + elif [ $EXIT_CODE -ne 0 ]; then + echo "❌ Ошибка при оценке модели $MODEL (код: $EXIT_CODE)" + else + echo "✅ Оценка успешно завершена" + fi + + # Всегда показываем лог ACC + echo "===================================" + echo "📋 Лог ACC:" + if [ -f "accuracy_$MODEL.txt" ]; then + cat "accuracy_$MODEL.txt" + else + echo "Файл лога не создан" + fi + + # Выход с ошибкой если нужно + if [ $EXIT_CODE -ne 0 ]; then + exit $EXIT_CODE + fi - name: Extract accuracy value run: | From 5c9312735d366afcdadf7b1576a061f26c48b938 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Wed, 4 Mar 2026 17:15:08 +0300 Subject: [PATCH 25/38] Update ci.yml --- .github/workflows/ci.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index cb586102..9550ad3d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -335,7 +335,7 @@ jobs: model_file: GoogLeNet.onnx model_path: docs/models/GoogLeNet.onnx model_url: '' - extra_args: "--onednn 100" + extra_args: "--onednn 1000" - model: densenet parser: parser_onnx.py model_file: densenet121_Opset16.onnx @@ -345,7 +345,7 @@ jobs: parser: parser_onnx.py model_file: resnest101e_Opset16.onnx model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/resnest101e_Opset16_timm/resnest101e_Opset16.onnx?download= - extra_args: "--onednn 10" + extra_args: "--onednn 100" - model: yolo parser: parser_onnx.py model_file: yolo11x-cls.pt From b0924af44122d8ef8b7270e3db7834acc7f75821 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Wed, 4 Mar 2026 18:14:35 +0300 Subject: [PATCH 26/38] Update ci.yml --- .github/workflows/ci.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9550ad3d..aabf2ae9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -442,6 +442,14 @@ jobs: run: | chmod +x "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" echo "LD_LIBRARY_PATH=$PWD/build/bin/all_libs:/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV + + - name: Increase system limits + run: | + sudo prlimit --pid $$ --as=unlimited + ulimit -s unlimited + ulimit -c unlimited + echo "Новые лимиты:" + ulimit -a - name: Run evaluation run: | From 73f30461e31963701c2bf004ff9c03761af8fb21 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Fri, 6 Mar 2026 17:49:35 +0300 Subject: [PATCH 27/38] Update ci.yml --- .github/workflows/ci.yml | 56 ++++++++++++---------------------------- 1 file changed, 17 insertions(+), 39 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aabf2ae9..6f5b0b4c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -457,12 +457,10 @@ jobs: MODEL="${{ matrix.model }}" EXTRA_ARGS="${{ matrix.extra_args }}" - echo "===================================" - echo "🚀 Запуск оценки для модели $MODEL" - echo "📋 Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model $MODEL $EXTRA_ARGS" - echo "===================================" + echo "Запуск оценки для модели $MODEL" + echo "Команда: ${{ steps.set_eval_binary.outputs.EVAL_BINARY }} --model $MODEL $EXTRA_ARGS" - echo "📊 Системная информация до запуска:" + echo "Системная информация до запуска:" echo "--- Память ---" free -h echo "--- CPU ---" @@ -470,28 +468,22 @@ jobs: echo "--- Диск ---" df -h docs/ImageNet/test/ - # Запускаем процесс с мониторингом TIMESTAMP=$(date +%s) MONITOR_LOG="monitor_$MODEL.log" - # Функция мониторинга monitor_process() { local pid=$1 local log_file=$2 - echo "📊 Мониторинг процесса $pid..." + echo "Мониторинг процесса $pid..." while kill -0 $pid 2>/dev/null; do - # Время NOW=$(date '+%H:%M:%S') - # Память процесса MEM=$(ps -o rss= -p $pid 2>/dev/null | awk '{print $1/1024 " MB"}' || echo "N/A") - # CPU процесса CPU=$(ps -o pcpu= -p $pid 2>/dev/null || echo "N/A") - # Общая память системы TOTAL_MEM=$(free -m | awk 'NR==2{print $3 " MB / " $2 " MB"}') echo "[$NOW] PID: $pid | MEM: $MEM | CPU: $CPU% | System MEM: $TOTAL_MEM" >> $log_file @@ -499,69 +491,55 @@ jobs: sleep 2 done - echo "📊 Процесс $pid завершен" >> $log_file + echo "Процесс $pid завершен" >> $log_file } - # Запускаем основную программу в фоне - echo "▶️ Запуск ACC в фоновом режиме..." + echo "Запуск ACC в фоновом режиме..." "${{ steps.set_eval_binary.outputs.EVAL_BINARY }}" \ --model $MODEL \ $EXTRA_ARGS > accuracy_$MODEL.txt 2>&1 & ACC_PID=$! - echo "📊 PID процесса: $ACC_PID" + echo "PID процесса: $ACC_PID" - # Запускаем мониторинг в фоне monitor_process $ACC_PID $MONITOR_LOG & MONITOR_PID=$! - # Ждем завершения ACC wait $ACC_PID EXIT_CODE=$? - # Убиваем мониторинг kill $MONITOR_PID 2>/dev/null || true - - echo "===================================" - echo "📊 Результаты мониторинга:" + + echo "Результаты мониторинга:" if [ -f "$MONITOR_LOG" ]; then cat "$MONITOR_LOG" else echo "Лог мониторинга не создан" fi - echo "===================================" - echo "📊 Системная информация после запуска:" + echo "Системная информация после запуска:" free -h - echo "===================================" - echo "📊 Код завершения: $EXIT_CODE" + echo "Код завершения: $EXIT_CODE" if [ $EXIT_CODE -eq 143 ]; then - echo "❌ Ошибка 143 (SIGTERM) - процесс убит системой" - echo "Возможные причины:" - echo " - Нехватка памяти (OOM Killer)" - echo " - Превышение лимитов CPU" - echo " - Timeout от GitHub Actions" + echo "Ошибка 143 (SIGTERM) - процесс убит системой" - # Проверка OOM Killer if sudo dmesg | tail -20 | grep -i "killed process" | grep -q "$ACC_PID"; then - echo "✅ Подтверждено: процесс убит OOM Killer" + echo "Подтверждено: процесс убит OOM Killer" sudo dmesg | tail -20 | grep -i "killed process" | tail -5 else - echo "❌ Не найдено подтверждение OOM Killer в логах" + echo "Не найдено подтверждение OOM Killer в логах" sudo dmesg | tail -20 fi elif [ $EXIT_CODE -ne 0 ]; then - echo "❌ Ошибка при оценке модели $MODEL (код: $EXIT_CODE)" + echo "Ошибка при оценке модели $MODEL (код: $EXIT_CODE)" else - echo "✅ Оценка успешно завершена" + echo "Оценка успешно завершена" fi - # Всегда показываем лог ACC - echo "===================================" - echo "📋 Лог ACC:" + echo "Лог ACC:" if [ -f "accuracy_$MODEL.txt" ]; then cat "accuracy_$MODEL.txt" else From 4e6a9bc233f280812015d7cb8e9909945a86a6bf Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 7 Mar 2026 11:32:52 +0300 Subject: [PATCH 28/38] static --- app/Graph/acc_check.cpp | 59 +++++++++++++++++++---------------------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index 9b4c0ee5..1517b56e 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -13,7 +13,7 @@ using namespace it_lab_ai; int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; RuntimeOptions options; - size_t numPhoto = 1000; + size_t num_photo = 1000; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { @@ -51,15 +51,15 @@ int main(int argc, char* argv[]) { options.threads = std::stoi(argv[++i]); } else { try { - numPhoto = std::stoi(argv[i]); + num_photo = std::stoi(argv[i]); - if (numPhoto < 1 || numPhoto > 50000) { - std::cerr << "Warning: numPhoto should be between 1 and 10000 " - << "Using value: " << numPhoto << std::endl; + if (num_photo < 1 || num_photo > 50000) { + std::cerr << "Warning: num_photo should be between 1 and 10000 " + << "Using value: " << num_photo << '\n'; } } catch (const std::exception& e) { std::cerr << "Error: Invalid numeric argument: " << argv[i] - << ". Using default value: 1000" << e.what() << std::endl; + << ". Using default value: 1000" << e.what() << '\n'; } } } @@ -83,7 +83,6 @@ int main(int argc, char* argv[]) { size_t sum = std::accumulate(counts.begin(), counts.end(), size_t{0}); int count_pic = static_cast(sum) + 10; std::vector res(count_pic * 28 * 28); - Tensor input; Shape sh1({1, 5, 5, 3}); std::vector vec; vec.reserve(75); @@ -111,14 +110,14 @@ int main(int argc, char* argv[]) { for (int j = 0; j < 28; ++j) { size_t a = ind; for (size_t n = 0; n < name; n++) a += counts[n] + 1; - res[(a) * 28 * 28 + i * 28 + j] = channels[0].at(j, i); + res[(a)*28 * 28 + i * 28 + j] = channels[0].at(j, i); } } } } Shape sh({static_cast(count_pic), 1, 28, 28}); Tensor t = make_tensor(res, sh); - input = t; + Tensor input = t; Graph graph; build_graph_linear(graph, input, output, options, false); graph.inference(options); @@ -147,14 +146,13 @@ int main(int argc, char* argv[]) { << "%" << '\n'; return 0; } - std::vector counts; + + std::vector counts(1000, 0); std::vector image_paths; std::vector true_labels; std::vector all_image_data; size_t total_images = 0; - counts.resize(1000, 0); - for (int class_id = 0; class_id < 1000; ++class_id) { std::ostringstream folder_oss; folder_oss << std::setw(5) << std::setfill('0') << class_id; @@ -171,17 +169,17 @@ int main(int argc, char* argv[]) { } } - size_t images_per_class_base = numPhoto / 1000; - size_t remaining = numPhoto % 1000; + size_t images_per_class_base = num_photo / 1000; + size_t remaining = num_photo % 1000; int channels = input_shape[1]; int height = input_shape[2]; int width = input_shape[3]; size_t image_size = channels * height * width; - all_image_data.reserve(numPhoto * image_size); - image_paths.reserve(numPhoto); - true_labels.reserve(numPhoto); + all_image_data.reserve(num_photo * image_size); + image_paths.reserve(num_photo); + true_labels.reserve(num_photo); total_images = 0; @@ -230,25 +228,24 @@ int main(int argc, char* argv[]) { if (taken < need_from_class) { std::cout << "Warning: Class " << class_id << " has only " << taken - << " images (needed " << need_from_class << ")" << std::endl; + << " images (needed " << need_from_class << ")" << '\n'; } } - if (total_images != numPhoto) { - std::cout << "Warning: Requested " << numPhoto << " images but loaded " - << total_images << " due to insufficient data" << std::endl; - numPhoto = total_images; + if (total_images != num_photo) { + std::cout << "Warning: Requested " << num_photo << " images but loaded " + << total_images << " due to insufficient data" << '\n'; + num_photo = total_images; } it_lab_ai::Shape input_shape_imagenet( - {static_cast(numPhoto), static_cast(channels), - static_cast(height), static_cast(width)}); + {num_photo, static_cast(channels), static_cast(height), + static_cast(width)}); it_lab_ai::Tensor input = it_lab_ai::make_tensor(all_image_data, input_shape_imagenet); size_t output_classes = 1000; - it_lab_ai::Shape output_shape( - {static_cast(numPhoto), output_classes}); + it_lab_ai::Shape output_shape({num_photo, output_classes}); it_lab_ai::Tensor output = it_lab_ai::Tensor(output_shape, it_lab_ai::Type::kFloat); @@ -260,7 +257,7 @@ int main(int argc, char* argv[]) { std::vector> processed_outputs; const std::vector& raw_output = *output.as(); - for (size_t i = 0; i < static_cast(numPhoto); ++i) { + for (size_t i = 0; i < num_photo; ++i) { std::vector single_output( raw_output.begin() + i * output_classes, raw_output.begin() + (i + 1) * output_classes); @@ -300,14 +297,14 @@ int main(int argc, char* argv[]) { } double final_accuracy_top1 = - (static_cast(correct_predictions_top1) / numPhoto) * 100; + (static_cast(correct_predictions_top1) / num_photo) * 100; double final_accuracy_top5 = - (static_cast(correct_predictions_top5) / numPhoto) * 100; + (static_cast(correct_predictions_top5) / num_photo) * 100; std::cout << "\nFinal Results:" << '\n'; std::cout << "Model: " << model_name << '\n'; std::cout << "Dataset: " << dataset_path << '\n'; - std::cout << "Total images: " << numPhoto << '\n'; + std::cout << "Total images: " << num_photo << '\n'; std::cout << "Correct predictions (Top-1): " << correct_predictions_top1 << '\n'; std::cout << "Correct predictions (Top-5): " << correct_predictions_top5 @@ -318,4 +315,4 @@ int main(int argc, char* argv[]) { << final_accuracy_top5 << "%" << '\n'; return 0; -} +} \ No newline at end of file From 4eb3771e0dd672af1247a861785834a1f4b63238 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Sat, 7 Mar 2026 11:34:01 +0300 Subject: [PATCH 29/38] cl --- app/Graph/acc_check.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index 1517b56e..ddf0a893 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -110,7 +110,7 @@ int main(int argc, char* argv[]) { for (int j = 0; j < 28; ++j) { size_t a = ind; for (size_t n = 0; n < name; n++) a += counts[n] + 1; - res[(a)*28 * 28 + i * 28 + j] = channels[0].at(j, i); + res[(a) * 28 * 28 + i * 28 + j] = channels[0].at(j, i); } } } From 040794f9d3667da2134c3ed5e349ef4cb56059d0 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Wed, 11 Mar 2026 11:47:58 +0300 Subject: [PATCH 30/38] batch 32 for accuracy --- app/Graph/acc_check.cpp | 278 ++++++++++++++++++++++++++++++++-------- app/Graph/build.cpp | 13 +- app/Graph/build.hpp | 2 +- include/graph/graph.hpp | 13 ++ 4 files changed, 245 insertions(+), 61 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index ddf0a893..40cba480 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -1,4 +1,14 @@ -#include + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif +#include +#include +#pragma comment(lib, "psapi.lib") +#include +#include +#include +#include #include #include #include @@ -7,17 +17,82 @@ #include "build.hpp" +class MemoryLogger { + private: + std::chrono::steady_clock::time_point start_time; + size_t peak_memory = 0; + size_t initial_memory = 0; + + size_t getProcessMemory() { + HANDLE hProcess = GetCurrentProcess(); + PROCESS_MEMORY_COUNTERS pmc; + pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS); + + if (GetProcessMemoryInfo(hProcess, &pmc, sizeof(pmc))) { + return pmc.WorkingSetSize / (1024 * 1024); + } + return 0; + } + + public: + MemoryLogger() { + start_time = std::chrono::steady_clock::now(); + initial_memory = getProcessMemory(); + log("START"); + } + + void log(const char* stage) { + auto now = std::chrono::steady_clock::now(); + auto elapsed = + std::chrono::duration_cast(now - start_time) + .count(); + + size_t current = getProcessMemory(); + if (current > peak_memory) peak_memory = current; + + std::cout << "[" << std::setw(4) << elapsed << "s] " << std::setw(30) + << stage << " | " + << "PROCESS MEM: " << std::setw(6) << current << " MB" + << " (PEAK: " << std::setw(6) << peak_memory << " MB)" + << " (DELTA: " << std::setw(4) << (current - initial_memory) + << " MB)\n"; + } + + ~MemoryLogger() { + log("END"); + std::cout << "====================================\n"; + std::cout << "PEAK PROCESS MEMORY: " << peak_memory << " MB\n"; + std::cout << "INITIAL PROCESS MEMORY: " << initial_memory << " MB\n"; + std::cout << "FINAL PROCESS MEMORY: " << getProcessMemory() << " MB\n"; + if (getProcessMemory() > initial_memory + 10) { + std::cout << "WARNING: Process memory growth! (+" + << (getProcessMemory() - initial_memory) << " MB)\n"; + } else { + std::cout << "OK: No significant process memory growth\n"; + } + } +}; + +MemoryLogger g_memLogger; + +#define LOG_MEM(stage) g_memLogger.log(stage) + namespace fs = std::filesystem; using namespace it_lab_ai; int main(int argc, char* argv[]) { + LOG_MEM("Program start"); + std::string model_name = "alexnet_mnist"; RuntimeOptions options; size_t num_photo = 1000; + size_t batch_size = 32; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { model_name = argv[++i]; + } else if (std::string(argv[i]) == "--batch" && i + 1 < argc) { + batch_size = std::stoi(argv[++i]); } else if (std::string(argv[i]) == "--onednn") { options.backend = Backend::kOneDnn; if (options.par_backend != ParBackend::kSeq) { @@ -64,6 +139,8 @@ int main(int argc, char* argv[]) { } } + LOG_MEM("After args parsing"); + std::string dataset_path; if (model_name == "alexnet_mnist") { dataset_path = MNIST_PATH; @@ -75,8 +152,10 @@ int main(int argc, char* argv[]) { std::vector input_shape = get_input_shape_from_json(json_path); std::cout << '\n'; - + int batch_count = 0; if (model_name == "alexnet_mnist") { + LOG_MEM("MNIST start"); + std::vector counts = {979, 1134, 1031, 1009, 981, 891, 957, 1027, 973, 1008}; int stat = 0; @@ -144,15 +223,20 @@ int main(int argc, char* argv[]) { (static_cast(stat) / static_cast(sum + 10)) * 100; std::cout << "Stat: " << std::fixed << std::setprecision(2) << percentage << "%" << '\n'; + + LOG_MEM("MNIST end"); return 0; } + LOG_MEM("ImageNet start"); + std::vector counts(1000, 0); std::vector image_paths; std::vector true_labels; std::vector all_image_data; size_t total_images = 0; + LOG_MEM("Counting classes"); for (int class_id = 0; class_id < 1000; ++class_id) { std::ostringstream folder_oss; folder_oss << std::setw(5) << std::setfill('0') << class_id; @@ -176,13 +260,16 @@ int main(int argc, char* argv[]) { int height = input_shape[2]; int width = input_shape[3]; size_t image_size = channels * height * width; + size_t output_classes = 1000; + LOG_MEM("Reserving memory"); all_image_data.reserve(num_photo * image_size); image_paths.reserve(num_photo); true_labels.reserve(num_photo); total_images = 0; + LOG_MEM("Loading images start"); for (int class_id = 0; class_id < 1000; ++class_id) { size_t need_from_class = images_per_class_base; if (remaining > 0) { @@ -230,72 +317,148 @@ int main(int argc, char* argv[]) { std::cout << "Warning: Class " << class_id << " has only " << taken << " images (needed " << need_from_class << ")" << '\n'; } + + if (class_id % 100 == 0 && class_id > 0) { + char buf[50]; + sprintf(buf, "Class %d", class_id); + LOG_MEM(buf); + } } + LOG_MEM("Images loaded"); + if (total_images != num_photo) { std::cout << "Warning: Requested " << num_photo << " images but loaded " << total_images << " due to insufficient data" << '\n'; num_photo = total_images; } - it_lab_ai::Shape input_shape_imagenet( - {num_photo, static_cast(channels), static_cast(height), - static_cast(width)}); - it_lab_ai::Tensor input = - it_lab_ai::make_tensor(all_image_data, input_shape_imagenet); - - size_t output_classes = 1000; - it_lab_ai::Shape output_shape({num_photo, output_classes}); - it_lab_ai::Tensor output = - it_lab_ai::Tensor(output_shape, it_lab_ai::Type::kFloat); - - Graph graph; - build_graph(graph, input, output, json_path, options, false); - graph.inference(options); - print_time_stats(graph); - - std::vector> processed_outputs; - const std::vector& raw_output = *output.as(); - - for (size_t i = 0; i < num_photo; ++i) { - std::vector single_output( - raw_output.begin() + i * output_classes, - raw_output.begin() + (i + 1) * output_classes); - std::vector processed_output = - process_model_output(single_output, model_name); - processed_outputs.push_back(processed_output); - } - int correct_predictions_top1 = 0; int correct_predictions_top5 = 0; - for (size_t i = 0; i < processed_outputs.size(); ++i) { - int true_label = true_labels[i]; - const std::vector& probabilities = processed_outputs[i]; - - std::vector indices(probabilities.size()); - std::iota(indices.begin(), indices.end(), 0); - std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { - return probabilities[a] > probabilities[b]; - }); - - size_t predicted_class_top1 = indices[0]; - if (predicted_class_top1 == static_cast(true_label)) { - correct_predictions_top1++; - } - bool found_in_top5 = false; - for (int top_k = 0; top_k < std::min(5, static_cast(indices.size())); - ++top_k) { - if (indices[top_k] == static_cast(true_label)) { - found_in_top5 = true; - break; + LOG_MEM("Starting batch processing"); + auto total_start_time = std::chrono::high_resolution_clock::now(); + int total_inference_time = 0; + + for (size_t batch_start = 0; batch_start < num_photo; + batch_start += batch_size) { + size_t batch_end = std::min(batch_start + batch_size, num_photo); + size_t current_batch_size = batch_end - batch_start; + + char batch_log[100]; + sprintf(batch_log, "Batch %zu/%zu (size %zu)", batch_start / batch_size + 1, + (num_photo + batch_size - 1) / batch_size, current_batch_size); + LOG_MEM(batch_log); + + std::vector batch_data; + batch_data.reserve(current_batch_size * image_size); + + size_t batch_offset = batch_start * image_size; + batch_data.insert(batch_data.end(), all_image_data.begin() + batch_offset, + all_image_data.begin() + batch_offset + + current_batch_size * image_size); + + it_lab_ai::Shape batch_input_shape( + {current_batch_size, static_cast(channels), + static_cast(height), static_cast(width)}); + it_lab_ai::Tensor batch_input = make_tensor(batch_data, batch_input_shape); + + it_lab_ai::Shape batch_output_shape({current_batch_size, output_classes}); + it_lab_ai::Tensor batch_output(batch_output_shape, it_lab_ai::Type::kFloat); + + Graph graph; + build_graph(graph, batch_input, batch_output, json_path, options, false); + + LOG_MEM("Batch inference"); + // auto batch_start_time = + // std::chrono::high_resolution_clock::now(); + graph.inference(options); + total_inference_time += print_time_stats(graph); + // auto batch_end_time = std::chrono::high_resolution_clock::now(); + // int batch_time = + // static_cast(std::chrono::duration_cast( + // batch_end_time - batch_start_time) + // .count()); // ← Добавлен static_cast + // total_inference_time += batch_time; + // batch_count++; + + // #ifdef ENABLE_STATISTIC_TIME + // std::vector elps_time = graph.getTime(); + // int batch_time = std::accumulate(elps_time.begin(), + // elps_time.end(), 0); total_inference_time += batch_time; + // batch_count++; + // + // char time_log[100]; + // sprintf(time_log, "Batch %d time: %d ms", batch_count, + // batch_time); LOG_MEM(time_log); + // #endif + + const std::vector& raw_batch_output = *batch_output.as(); + + for (size_t i = 0; i < current_batch_size; ++i) { + size_t global_idx = batch_start + i; + + std::vector single_output( + raw_batch_output.begin() + i * output_classes, + raw_batch_output.begin() + (i + 1) * output_classes); + + float max_val = + *std::max_element(single_output.begin(), single_output.end()); + float sum = 0.0f; + for (float& val : single_output) { + val = exp(val - max_val); + sum += val; + } + for (float& val : single_output) { + val /= sum; + } + + std::vector indices(single_output.size()); + std::iota(indices.begin(), indices.end(), 0); + std::sort(indices.begin(), indices.end(), [&](size_t a, size_t b) { + return single_output[a] > single_output[b]; + }); + + if (indices[0] == static_cast(true_labels[global_idx])) { + correct_predictions_top1++; + } + + for (int top_k = 0; top_k < std::min(5, static_cast(indices.size())); + ++top_k) { + if (indices[top_k] == static_cast(true_labels[global_idx])) { + correct_predictions_top5++; + break; + } } } - if (found_in_top5) { - correct_predictions_top5++; - } + + batch_data.clear(); + batch_data.shrink_to_fit(); } + auto total_end_time = std::chrono::high_resolution_clock::now(); + int total_time = + static_cast(std::chrono::duration_cast( + total_end_time - total_start_time) + .count()); + + std::cout << "\n!INFERENCE TIME INFO START!" << '\n'; + std::cout << "Total inference time (sum of batches): " << total_inference_time + << " ms\n"; + std::cout << "Total wall-clock time for all batches: " << total_time + << " ms\n"; + std::cout << "Number of batches: " << batch_count << '\n'; + std::cout << "Average time per batch: " + << (batch_count > 0 ? total_inference_time / batch_count : 0) + << " ms\n"; + std::cout << "!INFERENCE TIME INFO END!" << '\n'; + /*std::cout << "\n!INFERENCE TIME INFO START!" << '\n'; + std::cout << "Total inference time for all batches: " << total_inference_time + << " ms\n"; + std::cout << "Number of batches: " << batch_count << '\n'; + std::cout << "!INFERENCE TIME INFO END!" << '\n'; + LOG_MEM("All batches processed");*/ + double final_accuracy_top1 = (static_cast(correct_predictions_top1) / num_photo) * 100; double final_accuracy_top5 = @@ -305,6 +468,7 @@ int main(int argc, char* argv[]) { std::cout << "Model: " << model_name << '\n'; std::cout << "Dataset: " << dataset_path << '\n'; std::cout << "Total images: " << num_photo << '\n'; + std::cout << "Batch size: " << batch_size << '\n'; std::cout << "Correct predictions (Top-1): " << correct_predictions_top1 << '\n'; std::cout << "Correct predictions (Top-5): " << correct_predictions_top5 @@ -314,5 +478,13 @@ int main(int argc, char* argv[]) { std::cout << "Top-5 Accuracy: " << std::fixed << std::setprecision(2) << final_accuracy_top5 << "%" << '\n'; + all_image_data.clear(); + all_image_data.shrink_to_fit(); + image_paths.clear(); + image_paths.shrink_to_fit(); + true_labels.clear(); + true_labels.shrink_to_fit(); + + LOG_MEM("Program end"); return 0; } \ No newline at end of file diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index 381e8fcd..d904c1a5 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -1129,18 +1129,17 @@ it_lab_ai::Tensor prepare_mnist_image(const cv::Mat& image) { return it_lab_ai::make_tensor(res, sh); } -void print_time_stats(Graph& graph) { +int print_time_stats(Graph& graph) { #ifdef ENABLE_STATISTIC_TIME std::vector times = graph.getTimeInfo(); - std::cout << "!INFERENCE TIME INFO START!" << '\n'; - for (size_t i = 0; i < times.size(); i++) { + // std::cout << "!INFERENCE TIME INFO START!" << '\n'; + /*for (size_t i = 0; i < times.size(); i++) { std::cout << times[i] << '\n'; - } + }*/ std::vector elps_time = graph.getTime(); int sum = std::accumulate(elps_time.begin(), elps_time.end(), 0); - std::cout << "Elapsed inference time:" << sum << '\n'; - std::cout << "!INFERENCE TIME INFO END!" << '\n'; - graph.printLayerStats(); + // graph.printLayerStats(); + return sum; #else (void)graph; #endif diff --git a/app/Graph/build.hpp b/app/Graph/build.hpp index 5628e9d1..4c5920dd 100644 --- a/app/Graph/build.hpp +++ b/app/Graph/build.hpp @@ -76,7 +76,7 @@ it_lab_ai::Tensor prepare_image(const cv::Mat& image, const std::string& model_name = ""); it_lab_ai::Tensor prepare_mnist_image(const cv::Mat& image); -void print_time_stats(it_lab_ai::Graph& graph); +int print_time_stats(it_lab_ai::Graph& graph); namespace it_lab_ai { class LayerFactory { public: diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index 4743d996..6d7fdaa2 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -318,9 +318,11 @@ class Graph { for (size_t i = 0; i < traversal.size(); ++i) { int current_layer = traversal[i]; + #ifdef ENABLE_STATISTIC_TIME auto start = std::chrono::high_resolution_clock::now(); #endif + if (i != 0) { inten_.clear(); @@ -343,15 +345,19 @@ class Graph { } it->second.count_used_ten--; + + // - if (it->second.count_used_ten < 1) { branch_map_.erase(it); } } } } + if (outten_.empty()) { outten_.resize(1); } + layers_[current_layer]->run(inten_, outten_, options); #ifdef ENABLE_STATISTIC_TENSORS @@ -399,7 +405,9 @@ class Graph { } new_branch.distribution = dis; } + branch_map_[current_layer] = std::move(new_branch); + if (outtenres_ && current_layer == end_ && !branch_map_[current_layer].give_for_all.empty() && countinout[current_layer].second == 0) { @@ -433,6 +441,11 @@ class Graph { } #endif } + + // + branch_map_.clear(); + inten_.clear(); + outten_.clear(); } void setOutput(const std::shared_ptr& layer, Tensor& vec) { From d75a28d56278ea962fe7eda32a65e1697d924efc Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Wed, 11 Mar 2026 11:56:24 +0300 Subject: [PATCH 31/38] fix batch 32 --- app/Graph/acc_check.cpp | 5 ++--- app/Graph/build.cpp | 1 + include/graph/graph.hpp | 13 ------------- 3 files changed, 3 insertions(+), 16 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index 40cba480..e3a9b166 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -1,9 +1,8 @@ - -#ifndef WIN32_LEAN_AND_MEAN +#ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif -#include #include +#include #pragma comment(lib, "psapi.lib") #include #include diff --git a/app/Graph/build.cpp b/app/Graph/build.cpp index d904c1a5..cc61ff29 100644 --- a/app/Graph/build.cpp +++ b/app/Graph/build.cpp @@ -1142,5 +1142,6 @@ int print_time_stats(Graph& graph) { return sum; #else (void)graph; + return 0; #endif } diff --git a/include/graph/graph.hpp b/include/graph/graph.hpp index 6d7fdaa2..4743d996 100644 --- a/include/graph/graph.hpp +++ b/include/graph/graph.hpp @@ -318,11 +318,9 @@ class Graph { for (size_t i = 0; i < traversal.size(); ++i) { int current_layer = traversal[i]; - #ifdef ENABLE_STATISTIC_TIME auto start = std::chrono::high_resolution_clock::now(); #endif - if (i != 0) { inten_.clear(); @@ -345,19 +343,15 @@ class Graph { } it->second.count_used_ten--; - - // - if (it->second.count_used_ten < 1) { branch_map_.erase(it); } } } } - if (outten_.empty()) { outten_.resize(1); } - layers_[current_layer]->run(inten_, outten_, options); #ifdef ENABLE_STATISTIC_TENSORS @@ -405,9 +399,7 @@ class Graph { } new_branch.distribution = dis; } - branch_map_[current_layer] = std::move(new_branch); - if (outtenres_ && current_layer == end_ && !branch_map_[current_layer].give_for_all.empty() && countinout[current_layer].second == 0) { @@ -441,11 +433,6 @@ class Graph { } #endif } - - // - branch_map_.clear(); - inten_.clear(); - outten_.clear(); } void setOutput(const std::shared_ptr& layer, Tensor& vec) { From 510e764ffc692641bd8e407646578b7efea351b3 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Wed, 11 Mar 2026 12:53:53 +0300 Subject: [PATCH 32/38] one graph && batching --- app/Graph/acc_check.cpp | 84 ++++++++++++++++++++++++----------------- 1 file changed, 50 insertions(+), 34 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index e3a9b166..ed552f00 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -1,8 +1,9 @@ -#ifndef WIN32_LEAN_AND_MEAN + +#ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif -#include #include +#include #pragma comment(lib, "psapi.lib") #include #include @@ -85,7 +86,7 @@ int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; RuntimeOptions options; size_t num_photo = 1000; - size_t batch_size = 32; + size_t batch_size = 50; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { @@ -151,7 +152,7 @@ int main(int argc, char* argv[]) { std::vector input_shape = get_input_shape_from_json(json_path); std::cout << '\n'; - int batch_count = 0; + if (model_name == "alexnet_mnist") { LOG_MEM("MNIST start"); @@ -188,7 +189,7 @@ int main(int argc, char* argv[]) { for (int j = 0; j < 28; ++j) { size_t a = ind; for (size_t n = 0; n < name; n++) a += counts[n] + 1; - res[(a) * 28 * 28 + i * 28 + j] = channels[0].at(j, i); + res[(a)*28 * 28 + i * 28 + j] = channels[0].at(j, i); } } } @@ -335,9 +336,42 @@ int main(int argc, char* argv[]) { int correct_predictions_top1 = 0; int correct_predictions_top5 = 0; + LOG_MEM("Building master graph"); + + it_lab_ai::Shape full_shape({num_photo, static_cast(channels), + static_cast(height), + static_cast(width)}); + it_lab_ai::Tensor dummy_input = make_tensor(all_image_data, full_shape); + + it_lab_ai::Shape full_output_shape({num_photo, output_classes}); + it_lab_ai::Tensor dummy_output(full_output_shape, it_lab_ai::Type::kFloat); + + Graph graph; + build_graph(graph, dummy_input, dummy_output, json_path, options, false); + LOG_MEM("Master graph built"); + + std::shared_ptr input_layer = nullptr; + std::shared_ptr output_layer = nullptr; + + for (int i = 0; i < graph.getLayersCount(); ++i) { + auto layer = graph.getLayerFromID(i); + if (layer->getName() == kInput) { + input_layer = layer; + } + if (i == graph.getLayersCount() - 1) { + output_layer = layer; + } + } + + if (!input_layer || !output_layer) { + std::cerr << "Error: Could not find input/output layers" << '\n'; + return 1; + } + LOG_MEM("Starting batch processing"); auto total_start_time = std::chrono::high_resolution_clock::now(); int total_inference_time = 0; + int batch_count = 0; for (size_t batch_start = 0; batch_start < num_photo; batch_start += batch_size) { @@ -365,32 +399,20 @@ int main(int argc, char* argv[]) { it_lab_ai::Shape batch_output_shape({current_batch_size, output_classes}); it_lab_ai::Tensor batch_output(batch_output_shape, it_lab_ai::Type::kFloat); - Graph graph; - build_graph(graph, batch_input, batch_output, json_path, options, false); + graph.setInput(input_layer, batch_input); + graph.setOutput(output_layer, batch_output); LOG_MEM("Batch inference"); - // auto batch_start_time = - // std::chrono::high_resolution_clock::now(); + auto batch_start_time = std::chrono::high_resolution_clock::now(); graph.inference(options); - total_inference_time += print_time_stats(graph); - // auto batch_end_time = std::chrono::high_resolution_clock::now(); - // int batch_time = - // static_cast(std::chrono::duration_cast( - // batch_end_time - batch_start_time) - // .count()); // ← Добавлен static_cast - // total_inference_time += batch_time; - // batch_count++; - - // #ifdef ENABLE_STATISTIC_TIME - // std::vector elps_time = graph.getTime(); - // int batch_time = std::accumulate(elps_time.begin(), - // elps_time.end(), 0); total_inference_time += batch_time; - // batch_count++; - // - // char time_log[100]; - // sprintf(time_log, "Batch %d time: %d ms", batch_count, - // batch_time); LOG_MEM(time_log); - // #endif + auto batch_end_time = std::chrono::high_resolution_clock::now(); + + int batch_time = + static_cast(std::chrono::duration_cast( + batch_end_time - batch_start_time) + .count()); + total_inference_time += batch_time; + batch_count++; const std::vector& raw_batch_output = *batch_output.as(); @@ -451,12 +473,6 @@ int main(int argc, char* argv[]) { << (batch_count > 0 ? total_inference_time / batch_count : 0) << " ms\n"; std::cout << "!INFERENCE TIME INFO END!" << '\n'; - /*std::cout << "\n!INFERENCE TIME INFO START!" << '\n'; - std::cout << "Total inference time for all batches: " << total_inference_time - << " ms\n"; - std::cout << "Number of batches: " << batch_count << '\n'; - std::cout << "!INFERENCE TIME INFO END!" << '\n'; - LOG_MEM("All batches processed");*/ double final_accuracy_top1 = (static_cast(correct_predictions_top1) / num_photo) * 100; From aeeb3c64263c7c551ed9574bc7d5b8a4bb83ac86 Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Wed, 11 Mar 2026 13:07:43 +0300 Subject: [PATCH 33/38] clean --- app/Graph/acc_check.cpp | 111 +--------------------------------------- 1 file changed, 2 insertions(+), 109 deletions(-) diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index ed552f00..c7c74700 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -1,12 +1,4 @@ - -#ifndef WIN32_LEAN_AND_MEAN -#define WIN32_LEAN_AND_MEAN -#endif -#include -#include -#pragma comment(lib, "psapi.lib") -#include -#include +#include #include #include #include @@ -17,72 +9,10 @@ #include "build.hpp" -class MemoryLogger { - private: - std::chrono::steady_clock::time_point start_time; - size_t peak_memory = 0; - size_t initial_memory = 0; - - size_t getProcessMemory() { - HANDLE hProcess = GetCurrentProcess(); - PROCESS_MEMORY_COUNTERS pmc; - pmc.cb = sizeof(PROCESS_MEMORY_COUNTERS); - - if (GetProcessMemoryInfo(hProcess, &pmc, sizeof(pmc))) { - return pmc.WorkingSetSize / (1024 * 1024); - } - return 0; - } - - public: - MemoryLogger() { - start_time = std::chrono::steady_clock::now(); - initial_memory = getProcessMemory(); - log("START"); - } - - void log(const char* stage) { - auto now = std::chrono::steady_clock::now(); - auto elapsed = - std::chrono::duration_cast(now - start_time) - .count(); - - size_t current = getProcessMemory(); - if (current > peak_memory) peak_memory = current; - - std::cout << "[" << std::setw(4) << elapsed << "s] " << std::setw(30) - << stage << " | " - << "PROCESS MEM: " << std::setw(6) << current << " MB" - << " (PEAK: " << std::setw(6) << peak_memory << " MB)" - << " (DELTA: " << std::setw(4) << (current - initial_memory) - << " MB)\n"; - } - - ~MemoryLogger() { - log("END"); - std::cout << "====================================\n"; - std::cout << "PEAK PROCESS MEMORY: " << peak_memory << " MB\n"; - std::cout << "INITIAL PROCESS MEMORY: " << initial_memory << " MB\n"; - std::cout << "FINAL PROCESS MEMORY: " << getProcessMemory() << " MB\n"; - if (getProcessMemory() > initial_memory + 10) { - std::cout << "WARNING: Process memory growth! (+" - << (getProcessMemory() - initial_memory) << " MB)\n"; - } else { - std::cout << "OK: No significant process memory growth\n"; - } - } -}; - -MemoryLogger g_memLogger; - -#define LOG_MEM(stage) g_memLogger.log(stage) - namespace fs = std::filesystem; using namespace it_lab_ai; int main(int argc, char* argv[]) { - LOG_MEM("Program start"); - std::string model_name = "alexnet_mnist"; RuntimeOptions options; size_t num_photo = 1000; @@ -139,8 +69,6 @@ int main(int argc, char* argv[]) { } } - LOG_MEM("After args parsing"); - std::string dataset_path; if (model_name == "alexnet_mnist") { dataset_path = MNIST_PATH; @@ -154,8 +82,6 @@ int main(int argc, char* argv[]) { std::cout << '\n'; if (model_name == "alexnet_mnist") { - LOG_MEM("MNIST start"); - std::vector counts = {979, 1134, 1031, 1009, 981, 891, 957, 1027, 973, 1008}; int stat = 0; @@ -189,7 +115,7 @@ int main(int argc, char* argv[]) { for (int j = 0; j < 28; ++j) { size_t a = ind; for (size_t n = 0; n < name; n++) a += counts[n] + 1; - res[(a)*28 * 28 + i * 28 + j] = channels[0].at(j, i); + res[(a) * 28 * 28 + i * 28 + j] = channels[0].at(j, i); } } } @@ -223,20 +149,15 @@ int main(int argc, char* argv[]) { (static_cast(stat) / static_cast(sum + 10)) * 100; std::cout << "Stat: " << std::fixed << std::setprecision(2) << percentage << "%" << '\n'; - - LOG_MEM("MNIST end"); return 0; } - LOG_MEM("ImageNet start"); - std::vector counts(1000, 0); std::vector image_paths; std::vector true_labels; std::vector all_image_data; size_t total_images = 0; - LOG_MEM("Counting classes"); for (int class_id = 0; class_id < 1000; ++class_id) { std::ostringstream folder_oss; folder_oss << std::setw(5) << std::setfill('0') << class_id; @@ -262,14 +183,12 @@ int main(int argc, char* argv[]) { size_t image_size = channels * height * width; size_t output_classes = 1000; - LOG_MEM("Reserving memory"); all_image_data.reserve(num_photo * image_size); image_paths.reserve(num_photo); true_labels.reserve(num_photo); total_images = 0; - LOG_MEM("Loading images start"); for (int class_id = 0; class_id < 1000; ++class_id) { size_t need_from_class = images_per_class_base; if (remaining > 0) { @@ -317,16 +236,8 @@ int main(int argc, char* argv[]) { std::cout << "Warning: Class " << class_id << " has only " << taken << " images (needed " << need_from_class << ")" << '\n'; } - - if (class_id % 100 == 0 && class_id > 0) { - char buf[50]; - sprintf(buf, "Class %d", class_id); - LOG_MEM(buf); - } } - LOG_MEM("Images loaded"); - if (total_images != num_photo) { std::cout << "Warning: Requested " << num_photo << " images but loaded " << total_images << " due to insufficient data" << '\n'; @@ -336,8 +247,6 @@ int main(int argc, char* argv[]) { int correct_predictions_top1 = 0; int correct_predictions_top5 = 0; - LOG_MEM("Building master graph"); - it_lab_ai::Shape full_shape({num_photo, static_cast(channels), static_cast(height), static_cast(width)}); @@ -348,7 +257,6 @@ int main(int argc, char* argv[]) { Graph graph; build_graph(graph, dummy_input, dummy_output, json_path, options, false); - LOG_MEM("Master graph built"); std::shared_ptr input_layer = nullptr; std::shared_ptr output_layer = nullptr; @@ -368,7 +276,6 @@ int main(int argc, char* argv[]) { return 1; } - LOG_MEM("Starting batch processing"); auto total_start_time = std::chrono::high_resolution_clock::now(); int total_inference_time = 0; int batch_count = 0; @@ -378,11 +285,6 @@ int main(int argc, char* argv[]) { size_t batch_end = std::min(batch_start + batch_size, num_photo); size_t current_batch_size = batch_end - batch_start; - char batch_log[100]; - sprintf(batch_log, "Batch %zu/%zu (size %zu)", batch_start / batch_size + 1, - (num_photo + batch_size - 1) / batch_size, current_batch_size); - LOG_MEM(batch_log); - std::vector batch_data; batch_data.reserve(current_batch_size * image_size); @@ -402,7 +304,6 @@ int main(int argc, char* argv[]) { graph.setInput(input_layer, batch_input); graph.setOutput(output_layer, batch_output); - LOG_MEM("Batch inference"); auto batch_start_time = std::chrono::high_resolution_clock::now(); graph.inference(options); auto batch_end_time = std::chrono::high_resolution_clock::now(); @@ -493,13 +394,5 @@ int main(int argc, char* argv[]) { std::cout << "Top-5 Accuracy: " << std::fixed << std::setprecision(2) << final_accuracy_top5 << "%" << '\n'; - all_image_data.clear(); - all_image_data.shrink_to_fit(); - image_paths.clear(); - image_paths.shrink_to_fit(); - true_labels.clear(); - true_labels.shrink_to_fit(); - - LOG_MEM("Program end"); return 0; } \ No newline at end of file From dab3fdbf30693867fcec9dd38f0403b803ef4ffb Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Wed, 11 Mar 2026 20:40:45 +0300 Subject: [PATCH 34/38] up to 1000 --- .github/workflows/ci.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6f5b0b4c..aeafe460 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -340,17 +340,17 @@ jobs: parser: parser_onnx.py model_file: densenet121_Opset16.onnx model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/densenet121_Opset16_timm/densenet121_Opset16.onnx?download= - extra_args: "--onednn 100" + extra_args: "--onednn 1000" - model: resnet parser: parser_onnx.py model_file: resnest101e_Opset16.onnx model_url: https://github.com/onnx/models/raw/refs/heads/main/Computer_Vision/resnest101e_Opset16_timm/resnest101e_Opset16.onnx?download= - extra_args: "--onednn 100" + extra_args: "--onednn 1000" - model: yolo parser: parser_onnx.py model_file: yolo11x-cls.pt model_url: https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo11x-cls.pt - extra_args: "--onednn 100" + extra_args: "--onednn 1000" steps: - uses: actions/checkout@v4 @@ -579,8 +579,8 @@ jobs: echo -e "\n## ${{ matrix.model }} Accuracy\nAccuracy: ${ACCURACY}% (updated: ${DATE})\n" >> README.md fi - - name: Commit and push changes (master only) - if: github.ref == 'refs/heads/master' + - name: Commit and push changes (main only) + if: github.ref == 'refs/heads/main' run: | git config --global user.name "GitHub Actions" git config --global user.email "actions@github.com" From 2fe1f5a19c38611e9c0391725dc2ca83392528ad Mon Sep 17 00:00:00 2001 From: Semyon1104 Date: Thu, 12 Mar 2026 18:58:40 +0300 Subject: [PATCH 35/38] batch32 --- app/Converters/parser_onnx.py | 2 +- app/Graph/acc_check.cpp | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/app/Converters/parser_onnx.py b/app/Converters/parser_onnx.py index 0c0b931a..8fcecbcb 100644 --- a/app/Converters/parser_onnx.py +++ b/app/Converters/parser_onnx.py @@ -171,7 +171,7 @@ def default(self, obj): parser = argparse.ArgumentParser(description='Конвертация моделей в JSON формат') parser.add_argument('model_name', type=str, - choices=['googlenet', 'densenet', 'resnet', 'yolo', 'alexnet'], + choices=['googlenet', 'densenet', 'resnet', 'yolo'], help='Имя модели для обработки') args = parser.parse_args() diff --git a/app/Graph/acc_check.cpp b/app/Graph/acc_check.cpp index c7c74700..d37d4f89 100644 --- a/app/Graph/acc_check.cpp +++ b/app/Graph/acc_check.cpp @@ -16,13 +16,11 @@ int main(int argc, char* argv[]) { std::string model_name = "alexnet_mnist"; RuntimeOptions options; size_t num_photo = 1000; - size_t batch_size = 50; + size_t batch_size = 32; for (int i = 1; i < argc; ++i) { if (std::string(argv[i]) == "--model" && i + 1 < argc) { model_name = argv[++i]; - } else if (std::string(argv[i]) == "--batch" && i + 1 < argc) { - batch_size = std::stoi(argv[++i]); } else if (std::string(argv[i]) == "--onednn") { options.backend = Backend::kOneDnn; if (options.par_backend != ParBackend::kSeq) { From b235670755c319595b8c3aeaf5d84e1c2c13f5ec Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Thu, 12 Mar 2026 19:01:14 +0300 Subject: [PATCH 36/38] placeholders --- README.md | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 853a75c7..c428b2dd 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,21 @@ # AlexNet-MNIST-Inference ## Model Performance -Accuracy: Stat: 98.01% (updated: 2025-04-28) +### AlexNet-MNIST Accuracy + + +### GoogLeNet Accuracy + + +### DenseNet Accuracy + + +### ResNet Accuracy + + +### YOLO Accuracy + + ## Short description A lightweight C++ library for performing high-performance inference on classification tasks. Designed for efficiency and educational purposes, this project demonstrates how classic CNNs can be optimized for small-scale tasks in native environments. ### Key Features: From 5440df4be1d85c70189539b6517ae6c9c1bcabaa Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Thu, 12 Mar 2026 19:01:44 +0300 Subject: [PATCH 37/38] Update ci.yml --- .github/workflows/ci.yml | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index aeafe460..9307dd3e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -335,7 +335,7 @@ jobs: model_file: GoogLeNet.onnx model_path: docs/models/GoogLeNet.onnx model_url: '' - extra_args: "--onednn 1000" + extra_args: "--onednn 10000" - model: densenet parser: parser_onnx.py model_file: densenet121_Opset16.onnx @@ -568,15 +568,26 @@ jobs: - name: Update README for model (master only) if: github.ref == 'refs/heads/master' run: | - ACCURACY=$(cat accuracy_value_${{ matrix.model }}.txt | sed 's/%//g') + TOP1_ACC=$(grep -oE 'Top-1 Accuracy: [0-9]+\.?[0-9]*%' accuracy_${{ matrix.model }}.txt | grep -oE '[0-9]+\.?[0-9]*') + TOP5_ACC=$(grep -oE 'Top-5 Accuracy: [0-9]+\.?[0-9]*%' accuracy_${{ matrix.model }}.txt | grep -oE '[0-9]+\.?[0-9]*') DATE=$(date '+%Y-%m-%d') - + + if [ -z "$TOP1_ACC" ] || [ -z "$TOP5_ACC" ]; then + echo "Ошибка: Не удалось извлечь точность из файла accuracy_${{ matrix.model }}.txt" + cat accuracy_${{ matrix.model }}.txt + exit 1 + fi + + UPDATE_TEXT="Accuracy: Top-1: ${TOP1_ACC}% | Top-5: ${TOP5_ACC}% (updated: ${DATE})" + if grep -q "" README.md; then - sed -i "s/.*/Accuracy: ${ACCURACY}% (updated: ${DATE})/" README.md + sed -i "s|.*|${UPDATE_TEXT}|" README.md echo "Обновлена точность для ${{ matrix.model }} в README" else - echo "Плейсхолдер для ${{ matrix.model }} не найден в README, добавляем в конец" - echo -e "\n## ${{ matrix.model }} Accuracy\nAccuracy: ${ACCURACY}% (updated: ${DATE})\n" >> README.md + echo "Ошибка: Плейсхолдер не найден в README.md" + echo "Содержимое README.md:" + cat README.md + exit 1 fi - name: Commit and push changes (main only) From 2d0c1f2946fbe89f466cc722e139601b7c19bf45 Mon Sep 17 00:00:00 2001 From: Semyon1104 <129722895+Semyon1104@users.noreply.github.com> Date: Thu, 12 Mar 2026 20:32:07 +0300 Subject: [PATCH 38/38] fix cashe --- .github/workflows/ci.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9307dd3e..f45a5214 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -416,9 +416,8 @@ jobs: uses: actions/cache/restore@v4 with: path: docs/ImageNet - key: imagenet-paste-v1-${{ hashFiles('app/Converters/download_imagenet.py') }} + key: imagenet-paste-${{ github.run_id }}-${{ hashFiles('app/Converters/download_imagenet.py') }} restore-keys: | - imagenet-paste-v1- imagenet-paste- - name: Download ImageNet-Paste dataset