Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 70 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
name: snmalloc Benchmarking CI

# The following should ensure that the workflow only runs a single set of actions
# for each PR. But it will not apply this to pushes to the main branch.
concurrency:
group: benchmarking${{ github.ref }}
cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}

# Controls when the workflow will run
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
branches: [ main ]
pull_request:
branches: [ main ]

# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:

jobs:
benchmark:
runs-on: [self-hosted, 1ES.Pool=snmalloc-perf]

steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- name: Checkout
uses: actions/checkout@v3

# Setup docker buildx
- name: Setup Docker Buildx
uses: docker/setup-buildx-action@v2

- name: Build and Push Docker Image
id: docker_build
uses: docker/build-push-action@v4
with:
context: .
file: benchmark/Dockerfile
push: false
load: true
tags: snmalloc-bench
build-args: |
benchs=cfrac
repeats=1
cache-from: type=gha
cache-to: type=gha,mode=max

# Extracts the benchmark results from the Docker container
- name: Extract Benchmark Results
run: |
docker cp `docker run -d ${{ steps.docker_build.outputs.imageid }}`:/results.json .

# Uploads the benchmark results as an artifact
- name: Upload Benchmark Results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: results.json

# Upload to graphing service
- uses: bencherdev/bencher@main
- name: Upload benchmark results to Bencher
run: |
bencher run \
--project snmalloc \
--token '${{ secrets.BENCHER_DEV_API_TOKEN }}' \
--branch ${{ github.ref_name }} \
--adapter json \
--err \
--file results.json
44 changes: 44 additions & 0 deletions benchmark/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
FROM ubuntu:24.04

# Pull mimalloc-bench
RUN apt-get update && apt-get install -y --no-install-recommends git gpg ca-certificates python3-numpy
RUN git clone https://github.com/daanx/mimalloc-bench &&\
cd mimalloc-bench && \
git reset --hard ffa530dbbe046532dfcb4a1b58ffc06e144aee60

WORKDIR /mimalloc-bench
# Install dependencies
RUN ./build-bench-env.sh packages

# Tidy up apt cache
RUN apt-get clean && rm -rf /var/lib/apt/lists/*

# Build benchmarks
RUN ./build-bench-env.sh bench

RUN ./build-bench-env.sh redis

RUN ./build-bench-env.sh rocksdb \
&& find /mimalloc-bench/extern/rocksdb-8.1.1 -name "*.o" -delete

RUN ./build-bench-env.sh lean \
&& find /mimalloc-bench/extern/lean -name "*.o" -delete

RUN echo "sn /snmalloc/build/libsnmallocshim.so" > /allocs.txt

# Build allocator
RUN mkdir -p /snmalloc
COPY . /snmalloc

RUN mkdir -p /snmalloc/build
WORKDIR /snmalloc/build
RUN cmake -G Ninja -DCMAKE_BUILD_TYPE=Release ..
RUN ninja libsnmallocshim.so

# Run benchmarks
ARG benchs=allt
ARG repeats=1
WORKDIR /mimalloc-bench/out/bench
RUN ../../bench.sh --external=/allocs.txt $benchs -r=$repeats

RUN python3 /snmalloc/benchmark/bencher.dev.py /mimalloc-bench/out/bench/benchres.csv > /results.json
75 changes: 75 additions & 0 deletions benchmark/bencher.dev.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
# This script is adapted from the mimalloc-bench project.
# It converts the benchmark outputs to the format required by bencher.dev.

import re
import sys
import collections
try:
import numpy as np
except ImportError:
print('You need to install numpy.')
sys.exit(1)

if len(sys.argv) != 2:
print('Usage: %s benchres.csv' % sys.argv[0])
print('Where benchres.csv is the output of the benchmark script. I.e.')
print(' mimalloc-bench/out/bench/benchres.csv')
print()
print('The script generates a single file for submission to bencher.dev.')
sys.exit(1)

parse_line = re.compile('^([^ ]+) +([^ ]+) +([0-9:.]+) +([0-9]+)')
data = []
test_names = set()

# read in the data
with open(sys.argv[1]) as f:
for l in f.readlines():
match = parse_line.search(l)
if not match:
continue
test_name, alloc_name, time_string, memory = match.groups()
time_split = time_string.split(':')
time_taken = 0
test_names.add(test_name)
if len(time_split) == 2:
time_taken = int(time_split[0]) * 60 + float(time_split[1])
else:
time_taken = float(time_split[0])
data.append({"Benchmark":test_name, "Allocator":alloc_name, "Time":time_taken, "Memory":int(memory)})

# Output data in json of the form
#
# {
# "<Benchmark>":{
# "memory":{
# "value": <memory-mean>
# "high-value": <memory-high>
# "low-value": <memory-low>
# }
# "time":{
# "value": <time-mean>
# "high-value": <time-high>
# "low-value": <time-low>
# }
# }
# }

import json

output = {}
for test_name in test_names:
output[test_name] = {
"memory": {
"value": float(np.mean([d["Memory"] for d in data if d["Benchmark"] == test_name])),
"high-value": float(np.max([d["Memory"] for d in data if d["Benchmark"] == test_name])),
"low-value": float(np.min([d["Memory"] for d in data if d["Benchmark"] == test_name])),
},
"time": {
"value": float(np.mean([d["Time"] for d in data if d["Benchmark"] == test_name])),
"high-value": float(np.max([d["Time"] for d in data if d["Benchmark"] == test_name])),
"low-value": float(np.min([d["Time"] for d in data if d["Benchmark"] == test_name])),
}
}

print(json.dumps(output, indent=2))
Loading