From 36b4254d67c637306e6ca2a3b5a6b624f4e137fa Mon Sep 17 00:00:00 2001 From: Luca Toniolo <10792599+grandixximo@users.noreply.github.com> Date: Sat, 9 May 2026 19:55:23 +0800 Subject: [PATCH] feat(hal,rtapi): add initf one-shot init functs New hal_init_funct_to_thread() registers a funct that runs exactly once in RT context before the thread's cyclic list, on the first cycle after start_threads. After the init pass the thread loop calls new rtapi_task_self_resync() to re-anchor the periodic schedule, so a long init does not trip rtapi_wait()'s "unexpected realtime delay" catch-up loop and the next cyclic cycle starts on a clean period boundary. Primary use is EtherCAT master activation: ecrt_master_activate() must run in the RT thread immediately before cyclic comms, but the call itself takes far longer than a period. Surfaced as halcmd verb 'initf' (same +N/-N position semantics as addf). Late initf calls return -EALREADY so config order does not depend on whether start_threads has run yet. Posix and Xenomai backends resync by clock_gettime(CLOCK_MONOTONIC, &task->nextstart); Xenomai EVL uses evl_read_clock(EVL_CLOCK_MONOTONIC, ...). RTAI backend is a warn-once stub: per-task period storage is not currently kept and the primary consumer runs on uspace. Co-authored-by: Hannes Diethelm --- src/hal/hal.h | 19 ++++ src/hal/hal_lib.c | 161 ++++++++++++++++++++++++++++++- src/hal/hal_priv.h | 2 + src/hal/utils/halcmd.c | 1 + src/hal/utils/halcmd_commands.cc | 25 +++++ src/hal/utils/halcmd_commands.h | 1 + src/rtapi/rtai_rtapi.c | 18 ++++ src/rtapi/rtapi.h | 12 +++ src/rtapi/uspace_posix.cc | 10 ++ src/rtapi/uspace_rtai.cc | 10 ++ src/rtapi/uspace_rtapi_app.hh | 1 + src/rtapi/uspace_rtapi_main.cc | 4 + src/rtapi/uspace_xenomai.cc | 10 ++ src/rtapi/uspace_xenomai_evl.cc | 10 ++ 14 files changed, 283 insertions(+), 1 deletion(-) diff --git a/src/hal/hal.h b/src/hal/hal.h index 96656c2ed57..c247dcd4137 100644 --- a/src/hal/hal.h +++ b/src/hal/hal.h @@ -799,6 +799,25 @@ extern int hal_thread_delete(const char *name); extern int hal_add_funct_to_thread(const char *funct_name, const char *thread_name, int position); +/** hal_init_funct_to_thread() registers a function to run exactly once, + in the realtime context of 'thread_name', before the thread executes + any cyclic (addf-registered) function. The init list is invoked in a + dedicated "special cycle" the first time the thread observes + threads_running == 1; the cyclic funct list is skipped during that + cycle. After the init list returns, the thread's period is re-anchored + so the next cyclic cycle wakes one full period later, which both + avoids the spurious "unexpected realtime delay" warning that would + otherwise follow a long init and gives the cyclic pass a clean + starting boundary. + 'position' uses the same semantics as hal_add_funct_to_thread(): + positive values count from the start of the init list (+1 runs first), + negative values count from the end (-1 runs last); 0 is illegal. + Calls made after the init cycle has already run return -EALREADY and + have no effect. + Returns 0, -EALREADY, or a negative error code. Call only from user + space or init code, not from realtime code. */ +extern int hal_init_funct_to_thread(const char *funct_name, const char *thread_name, int position); + /** hal_del_funct_from_thread() removes a function from a thread. 'funct_name' is the name of the function, as specified in a call to hal_export_funct(). diff --git a/src/hal/hal_lib.c b/src/hal/hal_lib.c index af073f1de0a..ab19fe3c4fe 100644 --- a/src/hal/hal_lib.c +++ b/src/hal/hal_lib.c @@ -2386,6 +2386,119 @@ int hal_add_funct_to_thread(const char *funct_name, const char *thread_name, int return 0; } +int hal_init_funct_to_thread(const char *funct_name, const char *thread_name, int position) +{ + hal_thread_t *thread; + hal_funct_t *funct; + hal_list_t *list_root, *list_entry; + int n; + hal_funct_entry_t *funct_entry; + + if (hal_data == 0) { + rtapi_print_msg(RTAPI_MSG_ERR, + "HAL: ERROR: init_funct called before init\n"); + return -EINVAL; + } + + if (hal_data->lock & HAL_LOCK_CONFIG) { + rtapi_print_msg(RTAPI_MSG_ERR, + "HAL: ERROR: init_funct_to_thread called while HAL is locked\n"); + return -EPERM; + } + + if (position == 0) { + rtapi_print_msg(RTAPI_MSG_ERR, "HAL: ERROR: bad position: 0\n"); + return -EINVAL; + } + + if (funct_name == 0 || thread_name == 0) { + rtapi_print_msg(RTAPI_MSG_ERR, + "HAL: ERROR: missing function or thread name\n"); + return -EINVAL; + } + + rtapi_print_msg(RTAPI_MSG_DBG, + "HAL: adding init function '%s' to thread '%s'\n", + funct_name, thread_name); + + rtapi_mutex_get(&(hal_data->mutex)); + + funct = halpr_find_funct_by_name(funct_name); + if (funct == 0) { + rtapi_mutex_give(&(hal_data->mutex)); + rtapi_print_msg(RTAPI_MSG_ERR, + "HAL: ERROR: function '%s' not found\n", funct_name); + return -EINVAL; + } + + thread = halpr_find_thread_by_name(thread_name); + if (thread == 0) { + rtapi_mutex_give(&(hal_data->mutex)); + rtapi_print_msg(RTAPI_MSG_ERR, + "HAL: ERROR: thread '%s' not found\n", thread_name); + return -EINVAL; + } + + /* once the special init cycle has executed, further initf calls are a + no-op so config order doesn't depend on whether start_threads has been + issued. Surface this with -EALREADY so halcmd can warn loudly. */ + if (thread->init_done) { + rtapi_mutex_give(&(hal_data->mutex)); + rtapi_print_msg(RTAPI_MSG_WARN, + "HAL: WARNING: thread '%s' init cycle already ran; '%s' will not be invoked\n", + thread_name, funct_name); + return -EALREADY; + } + + /* find insertion point in init list (same semantics as + hal_add_funct_to_thread: +N from head, -N from tail) */ + list_root = &(thread->init_funct_list); + list_entry = list_root; + n = 0; + if (position > 0) { + while (++n < position) { + list_entry = list_next(list_entry); + if (list_entry == list_root) { + rtapi_mutex_give(&(hal_data->mutex)); + rtapi_print_msg(RTAPI_MSG_ERR, + "HAL: ERROR: position '%d' is too high\n", position); + return -EINVAL; + } + } + } else { + while (--n > position) { + list_entry = list_prev(list_entry); + if (list_entry == list_root) { + rtapi_mutex_give(&(hal_data->mutex)); + rtapi_print_msg(RTAPI_MSG_ERR, + "HAL: ERROR: position '%d' is too low\n", position); + return -EINVAL; + } + } + list_entry = list_prev(list_entry); + } + + /* allow the same funct to be on funct_list and init_funct_list, and to be + referenced multiple times in the init list itself (no users-cap check) */ + funct_entry = alloc_funct_entry_struct(); + if (funct_entry == 0) { + rtapi_mutex_give(&(hal_data->mutex)); + rtapi_print_msg(RTAPI_MSG_ERR, + "HAL: ERROR: insufficient memory for thread->init function link\n"); + return -ENOMEM; + } + funct_entry->funct_ptr = SHMOFF(funct); + funct_entry->arg = funct->arg; + funct_entry->funct = funct->funct; + + list_add_after((hal_list_t *) funct_entry, list_entry); + + funct->users++; + + rtapi_mutex_give(&(hal_data->mutex)); + return 0; +} + int hal_del_funct_from_thread(const char *funct_name, const char *thread_name) { hal_thread_t *thread; @@ -3007,7 +3120,36 @@ static void thread_task(void *arg) thread = arg; while (1) { - if (hal_data->threads_running > 0) { + if (hal_data->threads_running > 0 && !thread->init_done) { + /* special init cycle: run init_funct_list once with no + timing measurement, re-anchor the period so the long + init does not poison maxtime, does not trip the + "unexpected realtime delay" catch-up loop, and lands + the next wakeup at a clean period boundary (used to + keep EtherCAT send clear of SYNC0). The cyclic + funct_list is intentionally NOT executed in this + cycle -- the next cycle is the first clean cyclic + pass. After init_done is latched, drain the list back + to the free pool: every entry has already run, the + list serves no further purpose, and a halcmd 'initf' + arriving later is rejected by the init_done check. */ + funct_root = (hal_funct_entry_t *) & (thread->init_funct_list); + funct_entry = SHMPTR(funct_root->links.next); + while (funct_entry != funct_root) { + funct_entry->funct(funct_entry->arg, thread->period); + funct_entry = SHMPTR(funct_entry->links.next); + } + rtapi_task_self_resync(); + thread->init_done = 1; + /* drain the list now that the init pass is complete */ + funct_entry = SHMPTR(funct_root->links.next); + while (funct_entry != funct_root) { + hal_funct_entry_t *next_entry = SHMPTR(funct_entry->links.next); + list_remove_entry((hal_list_t *) funct_entry); + free_funct_entry_struct(funct_entry); + funct_entry = next_entry; + } + } else if (hal_data->threads_running > 0) { /* point at first function on function list */ funct_root = (hal_funct_entry_t *) & (thread->funct_list); funct_entry = SHMPTR(funct_root->links.next); @@ -3391,6 +3533,8 @@ static hal_thread_t *alloc_thread_struct(void) p->priority = 0; p->task_id = 0; list_init_entry(&(p->funct_list)); + list_init_entry(&(p->init_funct_list)); + p->init_done = 0; p->name[0] = '\0'; } return p; @@ -3637,6 +3781,20 @@ static void free_funct_struct(hal_funct_t * funct) list_entry = list_next(list_entry); } } + /* also sweep the init function list so unloaded comps don't + leave dangling init entries that would crash on the next + start_threads */ + list_root = &(thread->init_funct_list); + list_entry = list_next(list_root); + while (list_entry != list_root) { + funct_entry = (hal_funct_entry_t *) list_entry; + if (SHMPTR(funct_entry->funct_ptr) == funct) { + list_entry = list_remove_entry(list_entry); + free_funct_entry_struct(funct_entry); + } else { + list_entry = list_next(list_entry); + } + } /* move on to the next thread */ next_thread = thread->next_ptr; } @@ -4488,6 +4646,7 @@ EXPORT_SYMBOL(hal_export_functf); EXPORT_SYMBOL(hal_create_thread); EXPORT_SYMBOL(hal_add_funct_to_thread); +EXPORT_SYMBOL(hal_init_funct_to_thread); EXPORT_SYMBOL(hal_del_funct_from_thread); EXPORT_SYMBOL(hal_start_threads); diff --git a/src/hal/hal_priv.h b/src/hal/hal_priv.h index 0cf88db6815..33efadb303f 100644 --- a/src/hal/hal_priv.h +++ b/src/hal/hal_priv.h @@ -395,6 +395,8 @@ struct hal_thread_t { hal_s32_t* runtime; /* (pin) duration of last run, in CPU cycles */ hal_s32_t maxtime; /* (param) duration of longest run, in CPU cycles */ hal_list_t funct_list; /* list of functions to run */ + hal_list_t init_funct_list; /* list of init functions, run once before first cyclic cycle */ + int init_done; /* 0 = init pending, 1 = init cycle has executed */ char name[HAL_NAME_LEN + 1]; /* thread name */ int comp_id; }; diff --git a/src/hal/utils/halcmd.c b/src/hal/utils/halcmd.c index 2899e62992a..c440bd323bd 100644 --- a/src/hal/utils/halcmd.c +++ b/src/hal/utils/halcmd.c @@ -135,6 +135,7 @@ void halcmd_shutdown(void) { struct halcmd_command halcmd_commands[] = { {"addf", FUNCT(do_addf_cmd, cp_cp_cpp), A_TWO | A_PLUS }, + {"initf", FUNCT(do_initf_cmd, cp_cp_cpp), A_TWO | A_PLUS }, {"alias", FUNCT(do_alias_cmd, cp_cp_cp), A_THREE }, {"delf", FUNCT(do_delf_cmd, cp_cp), A_TWO | A_OPTIONAL }, {"delsig", FUNCT(do_delsig_cmd, cp), A_ONE }, diff --git a/src/hal/utils/halcmd_commands.cc b/src/hal/utils/halcmd_commands.cc index f6aa8fe4f99..5a7eb70f9f5 100644 --- a/src/hal/utils/halcmd_commands.cc +++ b/src/hal/utils/halcmd_commands.cc @@ -338,6 +338,31 @@ int do_addf_cmd(char *func, char *thread, char **opt) { return retval; } +int do_initf_cmd(char *func, char *thread, char **opt) { + /* usage: initf [position] + position has the same meaning as in addf: +N from start of the init + list (+1 = run first), -N from end (-1 = run last, default), 0 illegal. + The function runs once in realtime context in a dedicated cycle before + the cyclic funct list; next cyclic cycle wakes one period later. */ + char *position_str = opt ? opt[0] : NULL; + int position = -1; + int retval; + + if(position_str && *position_str) position = atoi(position_str); + + retval = hal_init_funct_to_thread(func, thread, position); + if(retval == 0) { + halcmd_info("Init function '%s' registered on thread '%s'\n", + func, thread); + } else if(retval == -EALREADY) { + halcmd_error("initf: thread '%s' init cycle already executed; " + "'%s' was NOT registered\n", thread, func); + } else { + halcmd_error("initf failed\n"); + } + return retval; +} + int do_alias_cmd(char *pinparam, char *name, char *alias) { int retval; diff --git a/src/hal/utils/halcmd_commands.h b/src/hal/utils/halcmd_commands.h index 63fbf44890d..49629b7d1e5 100644 --- a/src/hal/utils/halcmd_commands.h +++ b/src/hal/utils/halcmd_commands.h @@ -46,6 +46,7 @@ RTAPI_BEGIN_DECLS extern int do_addf_cmd(char *funct, char *thread, char *tokens[]); +extern int do_initf_cmd(char *funct, char *thread, char *tokens[]); extern int do_alias_cmd(char *pinparam, char *name, char *alias); extern int do_unalias_cmd(char *pinparam, char *name); extern int do_delf_cmd(char *funct, char *thread); diff --git a/src/rtapi/rtai_rtapi.c b/src/rtapi/rtai_rtapi.c index 9c1f9d961d0..999bd0aade0 100644 --- a/src/rtapi/rtai_rtapi.c +++ b/src/rtapi/rtai_rtapi.c @@ -918,6 +918,23 @@ int rtapi_task_pause(int task_id) return 0; } +void rtapi_task_self_resync(void) +{ + /* RTAI backend stub: re-anchoring the period from inside an RTAI task + requires per-task period storage that is not currently kept. The + primary consumer (EtherCAT init via initf) runs on the uspace + backend. If RTAI support is needed, store period_counts per task in + rtapi_task_start() and call + rt_task_make_periodic(rt_whoami(), rt_get_time() + period_counts, + period_counts) here. */ + static int warned = 0; + if (!warned) { + rtapi_print_msg(RTAPI_MSG_WARN, + "RTAPI: rtapi_task_self_resync() is a no-op on the RTAI backend\n"); + warned = 1; + } +} + int rtapi_task_self(void) { RT_TASK *ptr; @@ -1728,6 +1745,7 @@ EXPORT_SYMBOL(rtapi_wait); EXPORT_SYMBOL(rtapi_task_resume); EXPORT_SYMBOL(rtapi_task_pause); EXPORT_SYMBOL(rtapi_task_self); +EXPORT_SYMBOL(rtapi_task_self_resync); EXPORT_SYMBOL(rtapi_shmem_new); EXPORT_SYMBOL(rtapi_shmem_delete); EXPORT_SYMBOL(rtapi_shmem_getptr); diff --git a/src/rtapi/rtapi.h b/src/rtapi/rtapi.h index 2f07296061e..b42cc77e14e 100644 --- a/src/rtapi/rtapi.h +++ b/src/rtapi/rtapi.h @@ -502,6 +502,18 @@ RTAPI_BEGIN_DECLS */ extern int rtapi_task_self(void); +/** + * @brief Re-anchor the periodic schedule of the calling task. + * + * Sets the task's next wakeup to one full period from now, discarding any + * accumulated lag. Used after a long one-shot init sequence so the catch-up + * loop in rtapi_wait() does not fire "unexpected realtime delay" warnings + * and the next cyclic pass starts on a clean period boundary. + * @note Call only from within the realtime task whose schedule is to be + * re-anchored. No-op if the task is not periodic. + */ + extern void rtapi_task_self_resync(void); + #if defined(RTAPI_USPACE) || defined(USPACE) #define RTAPI_TASK_PLL_SUPPORT diff --git a/src/rtapi/uspace_posix.cc b/src/rtapi/uspace_posix.cc index a6490fdcf22..b2e3e619f3e 100644 --- a/src/rtapi/uspace_posix.cc +++ b/src/rtapi/uspace_posix.cc @@ -201,6 +201,16 @@ struct PosixApp : RtapiApp { return task->id; } + void task_self_resync() { + RtapiTask *task = reinterpret_cast(pthread_getspecific(key)); + if (!task) + return; + /* Set nextstart = now. The next rtapi_wait() advances it by + (period + pll_correction) and sleeps until then, giving exactly one + fresh period from this point. */ + clock_gettime(CLOCK_MONOTONIC, &task->nextstart); + } + bool do_thread_lock; static pthread_once_t key_once; diff --git a/src/rtapi/uspace_rtai.cc b/src/rtapi/uspace_rtai.cc index ad3adf2dd21..d2776769fb2 100644 --- a/src/rtapi/uspace_rtai.cc +++ b/src/rtapi/uspace_rtai.cc @@ -182,6 +182,16 @@ struct RtaiApp : RtapiApp { return task->id; } + void task_self_resync() { + /* RTAI uspace stub: per-task period storage is not kept here. */ + static int warned = 0; + if (!warned) { + rtapi_print_msg(RTAPI_MSG_WARN, + "RTAPI: rtapi_task_self_resync() is a no-op on the RTAI uspace backend\n"); + warned = 1; + } + } + static pthread_once_t key_once; static pthread_key_t key; static void init_key(void) { diff --git a/src/rtapi/uspace_rtapi_app.hh b/src/rtapi/uspace_rtapi_app.hh index 5a33423a689..9504dec2de6 100644 --- a/src/rtapi/uspace_rtapi_app.hh +++ b/src/rtapi/uspace_rtapi_app.hh @@ -103,6 +103,7 @@ struct RtapiApp { virtual int task_pause(int task_id) = 0; virtual int task_resume(int task_id) = 0; virtual int task_self() = 0; + virtual void task_self_resync() = 0; virtual long long task_pll_get_reference(void) = 0; virtual int task_pll_set_correction(long value) = 0; virtual void wait() = 0; diff --git a/src/rtapi/uspace_rtapi_main.cc b/src/rtapi/uspace_rtapi_main.cc index 216d5db9a19..3a73c5e294e 100644 --- a/src/rtapi/uspace_rtapi_main.cc +++ b/src/rtapi/uspace_rtapi_main.cc @@ -1062,6 +1062,10 @@ int rtapi_task_self() { return App().task_self(); } +void rtapi_task_self_resync(void) { + App().task_self_resync(); +} + long long rtapi_task_pll_get_reference(void) { return App().task_pll_get_reference(); } diff --git a/src/rtapi/uspace_xenomai.cc b/src/rtapi/uspace_xenomai.cc index 4c7ff05c5a2..c9d887f9e35 100644 --- a/src/rtapi/uspace_xenomai.cc +++ b/src/rtapi/uspace_xenomai.cc @@ -195,6 +195,16 @@ struct XenomaiApp : RtapiApp { return task->id; } + void task_self_resync() { + RtapiTask *task = reinterpret_cast(pthread_getspecific(key)); + if (!task) + return; + /* Set nextstart = now. The next rtapi_wait() advances it by + (period + pll_correction) and sleeps until then, giving exactly one + fresh period from this point. */ + clock_gettime(CLOCK_MONOTONIC, &task->nextstart); + } + static pthread_once_t key_once; static pthread_key_t key; static void init_key(void) { diff --git a/src/rtapi/uspace_xenomai_evl.cc b/src/rtapi/uspace_xenomai_evl.cc index 6c2d46b6142..6f1988337b0 100644 --- a/src/rtapi/uspace_xenomai_evl.cc +++ b/src/rtapi/uspace_xenomai_evl.cc @@ -216,6 +216,16 @@ struct EvlApp : RtapiApp { return task->id; } + void task_self_resync() { + RtapiTask *task = reinterpret_cast(pthread_getspecific(key)); + if (!task) + return; + /* Set nextstart = now. The next rtapi_wait() advances it by + (period + pll_correction) and sleeps until then, giving exactly one + fresh period from this point. */ + evl_read_clock(EVL_CLOCK_MONOTONIC, &task->nextstart); + } + static pthread_once_t key_once; static pthread_key_t key; static void init_key(void) {