Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 5 additions & 1 deletion pallets/subtensor/src/coinbase/root.rs
Original file line number Diff line number Diff line change
Expand Up @@ -587,7 +587,11 @@ impl<T: Config> Pallet<T> {
LastRateLimitedBlock::<T>::get(rate_limit_key)
}
pub fn set_rate_limited_last_block(rate_limit_key: &RateLimitKey<T::AccountId>, block: u64) {
LastRateLimitedBlock::<T>::insert(rate_limit_key, block);
if block == 0 {
LastRateLimitedBlock::<T>::remove(rate_limit_key);
} else {
LastRateLimitedBlock::<T>::insert(rate_limit_key, block);
}
}
pub fn remove_rate_limited_last_block(rate_limit_key: &RateLimitKey<T::AccountId>) {
LastRateLimitedBlock::<T>::remove(rate_limit_key);
Expand Down
13 changes: 9 additions & 4 deletions pallets/subtensor/src/coinbase/run_coinbase.rs
Original file line number Diff line number Diff line change
Expand Up @@ -598,13 +598,18 @@ impl<T: Config> Pallet<T> {
log::debug!("hotkey: {hotkey:?} alpha_divs: {alpha_divs:?}");
Self::increase_stake_for_hotkey_on_subnet(&hotkey, netuid, tou64!(alpha_divs).into());
// Record dividends for this hotkey.
AlphaDividendsPerSubnet::<T>::mutate(netuid, &hotkey, |divs| {
*divs = divs.saturating_add(tou64!(alpha_divs).into());
});
let alpha_divs_u64: u64 = tou64!(alpha_divs);
if alpha_divs_u64 != 0 {
AlphaDividendsPerSubnet::<T>::mutate(netuid, &hotkey, |divs| {
*divs = divs.saturating_add(alpha_divs_u64.into());
});
}
// Record total hotkey alpha based on which this value of AlphaDividendsPerSubnet
// was calculated
let total_hotkey_alpha = TotalHotkeyAlpha::<T>::get(&hotkey, netuid);
TotalHotkeyAlphaLastEpoch::<T>::insert(hotkey, netuid, total_hotkey_alpha);
if !total_hotkey_alpha.is_zero() {
TotalHotkeyAlphaLastEpoch::<T>::insert(hotkey, netuid, total_hotkey_alpha);
}
}

// Distribute root alpha divs.
Expand Down
5 changes: 5 additions & 0 deletions pallets/subtensor/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2441,6 +2441,11 @@ pub mod pallet {
#[pallet::storage]
pub type HasMigrationRun<T: Config> = StorageMap<_, Identity, Vec<u8>, bool, ValueQuery>;

/// --- Tracks the current phase of the zero-alpha multi-block cleanup.
/// 0 = inactive/complete, 1-4 = active phases (Alpha, TotalHotkeyShares, etc.)
#[pallet::storage]
pub type ZeroAlphaCleanupPhase<T: Config> = StorageValue<_, u8, ValueQuery>;

/// Default value for pending childkey cooldown (settable by root).
/// Uses the same value as DefaultPendingCooldown for consistency.
#[pallet::type_value]
Expand Down
9 changes: 8 additions & 1 deletion pallets/subtensor/src/macros/hooks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,11 @@ mod hooks {
}
}

// ---- Called when the block has leftover weight. Used for multi-block migrations.
fn on_idle(_block_number: BlockNumberFor<T>, remaining_weight: Weight) -> Weight {
migrations::migrate_remove_zero_alpha::on_idle_remove_zero_alpha::<T>(remaining_weight)
}

// ---- Called on the finalization of this pallet. The code weight must be taken into account prior to the execution of this macro.
//
// # Args:
Expand Down Expand Up @@ -166,7 +171,9 @@ mod hooks {
// Fix staking hot keys
.saturating_add(migrations::migrate_fix_staking_hot_keys::migrate_fix_staking_hot_keys::<T>())
// Migrate coldkey swap scheduled to announcements
.saturating_add(migrations::migrate_coldkey_swap_scheduled_to_announcements::migrate_coldkey_swap_scheduled_to_announcements::<T>());
.saturating_add(migrations::migrate_coldkey_swap_scheduled_to_announcements::migrate_coldkey_swap_scheduled_to_announcements::<T>())
// Remove zero-valued entries from Alpha and related storage maps
.saturating_add(migrations::migrate_remove_zero_alpha::migrate_remove_zero_alpha::<T>());
weight
}

Expand Down
216 changes: 216 additions & 0 deletions pallets/subtensor/src/migrations/migrate_remove_zero_alpha.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,216 @@
use super::*;
use frame_support::{traits::Get, weights::Weight};
use log;
use scale_info::prelude::string::String;

/// The migration name used for the `HasMigrationRun` guard.
const MIGRATION_NAME: &[u8] = b"migrate_remove_zero_alpha_v2";

/// Called from `on_runtime_upgrade`. Schedules the cleanup by setting phase = 1
/// if the migration hasn't run yet. This is O(1) — no iteration.
pub fn migrate_remove_zero_alpha<T: Config>() -> Weight {
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

After the actual cleanup executed in the on_idle, I am wondering if we really need the migration part.
For instance, we can set the ZeroAlphaCleanupPhase init value as 1 in storage.
then on_idle start to handle it until the value go to 0.
Or use different values for different steps, make it clearer.
What's your opinion?

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. We could simplify by setting the default to 1, but the issue is: after cleanup completes and sets phase=0, a node restart or fresh sync would read the default (1) again and re-trigger cleanup. The current approach uses HasMigrationRun as a permanent guard to prevent this.

Copy link
Copy Markdown
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point. We could simplify by setting the default to 1, but the issue is: after cleanup completes and sets phase=0, a node restart or fresh sync would read the default (1) again and re-trigger cleanup. The current approach uses HasMigrationRun as a permanent guard to prevent this.

@open-junius what do you think about it? Still should i need to update?

let migration_name = MIGRATION_NAME.to_vec();
let mut weight = T::DbWeight::get().reads(1);

if HasMigrationRun::<T>::get(&migration_name) {
log::info!(
"Migration '{}' already completed. Skipping.",
String::from_utf8_lossy(&migration_name)
);
return weight;
}

// Schedule the cleanup to run in on_idle by setting phase to 1
ZeroAlphaCleanupPhase::<T>::put(1u8);
weight = weight.saturating_add(T::DbWeight::get().writes(1));

log::info!(
"Migration '{}' scheduled. Will clean up zero entries via on_idle.",
String::from_utf8_lossy(&migration_name)
);

weight
}

/// Called from `on_idle` each block. Uses `remaining_weight` to dynamically
/// bound how many entries to process. Stays on the same phase until all entries
/// in that map are cleaned, then advances to the next phase.
///
/// Phases:
/// 0 = inactive/complete
/// 1 = cleaning Alpha
/// 2 = cleaning TotalHotkeyShares
/// 3 = cleaning TotalHotkeyAlphaLastEpoch
/// 4 = cleaning AlphaDividendsPerSubnet
pub fn on_idle_remove_zero_alpha<T: Config>(remaining_weight: Weight) -> Weight {
let phase = ZeroAlphaCleanupPhase::<T>::get();

// Phase 0 means not active or already completed
if phase == 0 {
return Weight::zero();
}

// Minimum weight needed: 1 read (phase) + at least one iteration (read + write)
let min_weight = T::DbWeight::get().reads_writes(2, 1);
if remaining_weight.ref_time() < min_weight.ref_time() {
return Weight::zero();
}

let mut weight = T::DbWeight::get().reads(1); // reading phase

// Budget for batch work = remaining_weight minus overhead (phase read + phase write)
let overhead = T::DbWeight::get().reads_writes(1, 1);
let budget = remaining_weight.saturating_sub(overhead);

match phase {
1 => {
let (consumed, removed, done) = clean_alpha_batch::<T>(budget);
weight = weight.saturating_add(consumed);
log::info!(
"Zero-alpha cleanup phase 1 (Alpha): removed {removed} zero entries this batch. Done: {done}"
);
if done {
ZeroAlphaCleanupPhase::<T>::put(2u8);
weight = weight.saturating_add(T::DbWeight::get().writes(1));
}
}
2 => {
let (consumed, removed, done) = clean_total_hotkey_shares_batch::<T>(budget);
weight = weight.saturating_add(consumed);
log::info!(
"Zero-alpha cleanup phase 2 (TotalHotkeyShares): removed {removed} zero entries this batch. Done: {done}"
);
if done {
ZeroAlphaCleanupPhase::<T>::put(3u8);
weight = weight.saturating_add(T::DbWeight::get().writes(1));
}
}
3 => {
let (consumed, removed, done) = clean_total_hotkey_alpha_last_epoch_batch::<T>(budget);
weight = weight.saturating_add(consumed);
log::info!(
"Zero-alpha cleanup phase 3 (TotalHotkeyAlphaLastEpoch): removed {removed} zero entries this batch. Done: {done}"
);
if done {
ZeroAlphaCleanupPhase::<T>::put(4u8);
weight = weight.saturating_add(T::DbWeight::get().writes(1));
}
}
4 => {
let (consumed, removed, done) = clean_alpha_dividends_per_subnet_batch::<T>(budget);
weight = weight.saturating_add(consumed);
log::info!(
"Zero-alpha cleanup phase 4 (AlphaDividendsPerSubnet): removed {removed} zero entries this batch. Done: {done}"
);
if done {
// All phases complete — mark migration as done
HasMigrationRun::<T>::insert(MIGRATION_NAME.to_vec(), true);
ZeroAlphaCleanupPhase::<T>::put(0u8);
weight = weight.saturating_add(T::DbWeight::get().writes(2));
log::info!("Zero-alpha cleanup: All phases complete. Migration marked as done.");
}
}
_ => {
// Unknown phase, reset
ZeroAlphaCleanupPhase::<T>::put(0u8);
weight = weight.saturating_add(T::DbWeight::get().writes(1));
}
}

weight
}

/// Remove zero-valued entries from Alpha, bounded by weight budget.
/// Returns (weight_consumed, entries_removed, is_done).
fn clean_alpha_batch<T: Config>(budget: Weight) -> (Weight, u64, bool) {
let read_cost = T::DbWeight::get().reads(1);
let write_cost = T::DbWeight::get().writes(1);
let per_entry_max = read_cost.saturating_add(write_cost);
let mut weight = Weight::zero();
let mut removed = 0u64;

for ((hotkey, coldkey, netuid), value) in Alpha::<T>::iter() {
// Stop if not enough budget for one more entry (read + potential write)
if weight.saturating_add(per_entry_max).any_gt(budget) {
return (weight, removed, false);
}
weight = weight.saturating_add(read_cost);
if value == 0 {
Alpha::<T>::remove((hotkey, coldkey, netuid));
weight = weight.saturating_add(write_cost);
removed = removed.saturating_add(1);
}
}

// Iterator exhausted — phase is done
(weight, removed, true)
}

/// Remove zero-valued entries from TotalHotkeyShares, bounded by weight budget.
fn clean_total_hotkey_shares_batch<T: Config>(budget: Weight) -> (Weight, u64, bool) {
let read_cost = T::DbWeight::get().reads(1);
let write_cost = T::DbWeight::get().writes(1);
let per_entry_max = read_cost.saturating_add(write_cost);
let mut weight = Weight::zero();
let mut removed = 0u64;

for (hotkey, netuid, value) in TotalHotkeyShares::<T>::iter() {
if weight.saturating_add(per_entry_max).any_gt(budget) {
return (weight, removed, false);
}
weight = weight.saturating_add(read_cost);
if value == 0 {
TotalHotkeyShares::<T>::remove(hotkey, netuid);
weight = weight.saturating_add(write_cost);
removed = removed.saturating_add(1);
}
}

(weight, removed, true)
}

/// Remove zero-valued entries from TotalHotkeyAlphaLastEpoch, bounded by weight budget.
fn clean_total_hotkey_alpha_last_epoch_batch<T: Config>(budget: Weight) -> (Weight, u64, bool) {
let read_cost = T::DbWeight::get().reads(1);
let write_cost = T::DbWeight::get().writes(1);
let per_entry_max = read_cost.saturating_add(write_cost);
let mut weight = Weight::zero();
let mut removed = 0u64;

for (hotkey, netuid, value) in TotalHotkeyAlphaLastEpoch::<T>::iter() {
if weight.saturating_add(per_entry_max).any_gt(budget) {
return (weight, removed, false);
}
weight = weight.saturating_add(read_cost);
if value.is_zero() {
TotalHotkeyAlphaLastEpoch::<T>::remove(hotkey, netuid);
weight = weight.saturating_add(write_cost);
removed = removed.saturating_add(1);
}
}

(weight, removed, true)
}

/// Remove zero-valued entries from AlphaDividendsPerSubnet, bounded by weight budget.
fn clean_alpha_dividends_per_subnet_batch<T: Config>(budget: Weight) -> (Weight, u64, bool) {
let read_cost = T::DbWeight::get().reads(1);
let write_cost = T::DbWeight::get().writes(1);
let per_entry_max = read_cost.saturating_add(write_cost);
let mut weight = Weight::zero();
let mut removed = 0u64;

for (netuid, hotkey, value) in AlphaDividendsPerSubnet::<T>::iter() {
if weight.saturating_add(per_entry_max).any_gt(budget) {
return (weight, removed, false);
}
weight = weight.saturating_add(read_cost);
if value.is_zero() {
AlphaDividendsPerSubnet::<T>::remove(netuid, hotkey);
weight = weight.saturating_add(write_cost);
removed = removed.saturating_add(1);
}
}

(weight, removed, true)
}
1 change: 1 addition & 0 deletions pallets/subtensor/src/migrations/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ pub mod migrate_remove_tao_dividends;
pub mod migrate_remove_total_hotkey_coldkey_stakes_this_interval;
pub mod migrate_remove_unknown_neuron_axon_cert_prom;
pub mod migrate_remove_unused_maps_and_values;
pub mod migrate_remove_zero_alpha;
pub mod migrate_remove_zero_total_hotkey_alpha;
pub mod migrate_reset_bonds_moving_average;
pub mod migrate_reset_max_burn;
Expand Down
16 changes: 12 additions & 4 deletions pallets/subtensor/src/staking/claim_root.rs
Original file line number Diff line number Diff line change
Expand Up @@ -263,8 +263,12 @@ impl<T: Config> Pallet<T> {
.saturating_to_num(),
);

// Set the new root claimed value.
RootClaimed::<T>::insert((netuid, hotkey, coldkey), new_root_claimed);
// Set the new root claimed value, or remove if zero to avoid storage bloat.
if new_root_claimed != 0 {
RootClaimed::<T>::insert((netuid, hotkey, coldkey), new_root_claimed);
} else {
RootClaimed::<T>::remove((netuid, hotkey, coldkey));
}
}
}

Expand All @@ -290,8 +294,12 @@ impl<T: Config> Pallet<T> {
.saturating_to_num(),
);

// Set the new root_claimed value.
RootClaimed::<T>::insert((netuid, hotkey, coldkey), new_root_claimed);
// Set the new root_claimed value, removing if zero to avoid storage bloat.
if new_root_claimed != 0 {
RootClaimed::<T>::insert((netuid, hotkey, coldkey), new_root_claimed);
} else {
RootClaimed::<T>::remove((netuid, hotkey, coldkey));
}
}
}

Expand Down
8 changes: 2 additions & 6 deletions pallets/subtensor/src/staking/set_children.rs
Original file line number Diff line number Diff line change
Expand Up @@ -412,15 +412,11 @@ impl<T: Config> Pallet<T> {
for (parent, _) in relations.parents().iter() {
let mut ck = ChildKeys::<T>::get(parent.clone(), netuid);
PCRelations::<T>::remove_edge(&mut ck, old_hotkey);
ChildKeys::<T>::insert(parent.clone(), netuid, ck);
Self::set_childkeys(parent.clone(), netuid, ck);
weight.saturating_accrue(T::DbWeight::get().reads_writes(1, 1));
}
// 2c) Clear direct maps of old_hotkey
ChildKeys::<T>::insert(
old_hotkey.clone(),
netuid,
Vec::<(u64, T::AccountId)>::new(),
);
ChildKeys::<T>::remove(old_hotkey.clone(), netuid);
Self::set_parentkeys(
old_hotkey.clone(),
netuid,
Expand Down
8 changes: 6 additions & 2 deletions pallets/subtensor/src/swap/swap_coldkey.rs
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,9 @@ impl<T: Config> Pallet<T> {
}

StakingHotkeys::<T>::remove(old_coldkey);
StakingHotkeys::<T>::insert(new_coldkey, new_staking_hotkeys);
if !new_staking_hotkeys.is_empty() {
StakingHotkeys::<T>::insert(new_coldkey, new_staking_hotkeys);
}
}

/// Transfer the ownership of the hotkeys owned by the old coldkey to the new coldkey.
Expand All @@ -178,6 +180,8 @@ impl<T: Config> Pallet<T> {
}
}
OwnedHotkeys::<T>::remove(old_coldkey);
OwnedHotkeys::<T>::insert(new_coldkey, new_owned_hotkeys);
if !new_owned_hotkeys.is_empty() {
OwnedHotkeys::<T>::insert(new_coldkey, new_owned_hotkeys);
}
}
}
Loading
Loading