From f2abfadee192482b2e4d5aabf0cc43868079a29b Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Wed, 4 Mar 2026 23:12:01 -0500 Subject: [PATCH 01/12] add var_files support and Docker image priming for runtime builds - Exclude var_files patterns from sysext/confext .raw images (erofs and squashfs) - Apply extension var_files to var partition during runtime build - Apply runtime-level var_files mappings to var partition - Prime Docker image cache on var partition by starting a temporary dockerd - Auto-add --privileged to runtime build when extensions declare docker_images - Update compute_runtime_input_hash to include full parsed config for hash stability --- src/commands/ext/image.rs | 121 ++++++++++++++-- src/commands/runtime/build.rs | 240 ++++++++++++++++++++++++++++++-- src/commands/runtime/install.rs | 2 +- src/utils/config.rs | 177 +++++++++++++++++++++++ src/utils/stamps.rs | 172 ++++++++++++++++++++++- 5 files changed, 678 insertions(+), 34 deletions(-) diff --git a/src/commands/ext/image.rs b/src/commands/ext/image.rs index 3053b79..abd8752 100644 --- a/src/commands/ext/image.rs +++ b/src/commands/ext/image.rs @@ -377,6 +377,9 @@ impl ExtImageCommand { let source_date_epoch = config.source_date_epoch.unwrap_or(0); + // Get var_files patterns — these files go on the var partition and are excluded from the .raw image + let var_files = crate::utils::config::get_ext_var_files(&ext_config); + let result = self .create_image( &container_helper, @@ -389,6 +392,7 @@ impl ExtImageCommand { &merged_container_args, source_date_epoch, filesystem, + &var_files, ) .await?; @@ -460,10 +464,16 @@ impl ExtImageCommand { merged_container_args: &Option>, source_date_epoch: u64, filesystem: &str, + var_files: &[String], ) -> Result { // Create the build script - let build_script = - self.create_build_script(ext_version, extension_type, source_date_epoch, filesystem); + let build_script = self.create_build_script( + ext_version, + extension_type, + source_date_epoch, + filesystem, + var_files, + ); // Execute the build script in the SDK container if self.verbose { @@ -496,25 +506,64 @@ impl ExtImageCommand { _extension_type: &str, source_date_epoch: u64, filesystem: &str, + var_files: &[String], ) -> String { + // Build exclude flags for var_files patterns (these go on the var partition, not in the .raw image) + let var_excludes = var_files + .iter() + .map(|pattern| { + // Strip trailing /** or /* glob suffixes to get the directory path for exclusion + let clean = pattern + .trim_end_matches("/**") + .trim_end_matches("/*"); + clean.to_string() + }) + .collect::>(); + let mkfs_command = match filesystem { - "erofs" => r#"# Create erofs image + "erofs" => { + let exclude_flags = var_excludes + .iter() + .map(|p| format!(" --exclude-path={p} \\")) + .collect::>() + .join("\n"); + let exclude_section = if exclude_flags.is_empty() { + String::new() + } else { + format!("\n{exclude_flags}") + }; + format!( + r#"# Create erofs image mkfs.erofs \ -T "$SOURCE_DATE_EPOCH" \ -U 00000000-0000-0000-0000-000000000000 \ -x -1 \ - --all-root \ + --all-root \{exclude_section} "$OUTPUT_FILE" \ "$AVOCADO_EXT_SYSROOTS/$EXT_NAME""# - .to_string(), - _ => r#"# Create squashfs image + ) + } + _ => { + let exclude_flags = var_excludes + .iter() + .map(|p| format!(" -e \"{p}\"")) + .collect::>() + .join(" \\\n"); + let exclude_section = if exclude_flags.is_empty() { + String::new() + } else { + format!(" \\\n{exclude_flags}") + }; + format!( + r#"# Create squashfs image mksquashfs \ "$AVOCADO_EXT_SYSROOTS/$EXT_NAME" \ "$OUTPUT_FILE" \ -noappend \ -no-xattrs \ - -reproducible"# - .to_string(), + -reproducible{exclude_section}"# + ) + } }; format!( @@ -572,7 +621,7 @@ mod tests { #[test] fn test_create_build_script_erofs_contains_reproducible_flags() { let cmd = make_cmd("my-ext"); - let script = cmd.create_build_script("1.0.0", "sysext", 0, "erofs"); + let script = cmd.create_build_script("1.0.0", "sysext", 0, "erofs", &[]); assert!( script.contains("mkfs.erofs"), @@ -595,7 +644,7 @@ mod tests { #[test] fn test_create_build_script_squashfs_contains_reproducible_flags() { let cmd = make_cmd("my-ext"); - let script = cmd.create_build_script("1.0.0", "sysext", 0, "squashfs"); + let script = cmd.create_build_script("1.0.0", "sysext", 0, "squashfs", &[]); assert!( script.contains("mksquashfs"), @@ -623,7 +672,7 @@ mod tests { fn test_create_build_script_defaults_to_squashfs() { let cmd = make_cmd("my-ext"); // Passing "squashfs" simulates the default behavior - let script = cmd.create_build_script("1.0.0", "sysext", 0, "squashfs"); + let script = cmd.create_build_script("1.0.0", "sysext", 0, "squashfs", &[]); assert!( script.contains("mksquashfs"), @@ -634,7 +683,7 @@ mod tests { #[test] fn test_create_build_script_source_date_epoch_default() { let cmd = make_cmd("my-ext"); - let script = cmd.create_build_script("1.0.0", "sysext", 0, "erofs"); + let script = cmd.create_build_script("1.0.0", "sysext", 0, "erofs", &[]); assert!( script.contains("export SOURCE_DATE_EPOCH=0"), @@ -649,7 +698,7 @@ mod tests { #[test] fn test_create_build_script_source_date_epoch_custom() { let cmd = make_cmd("my-ext"); - let script = cmd.create_build_script("1.0.0", "sysext", 1700000000, "erofs"); + let script = cmd.create_build_script("1.0.0", "sysext", 1700000000, "erofs", &[]); assert!( script.contains("export SOURCE_DATE_EPOCH=1700000000"), @@ -664,7 +713,7 @@ mod tests { #[test] fn test_create_build_script_extension_name_and_version() { let cmd = make_cmd("test-extension"); - let script = cmd.create_build_script("2.3.4", "sysext", 0, "squashfs"); + let script = cmd.create_build_script("2.3.4", "sysext", 0, "squashfs", &[]); assert!( script.contains("EXT_NAME=\"test-extension\""), @@ -679,11 +728,53 @@ mod tests { #[test] fn test_create_build_script_output_path() { let cmd = make_cmd("my-ext"); - let script = cmd.create_build_script("1.0.0", "sysext", 0, "squashfs"); + let script = cmd.create_build_script("1.0.0", "sysext", 0, "squashfs", &[]); assert!( script.contains("OUTPUT_FILE=\"$OUTPUT_DIR/$EXT_NAME-$EXT_VERSION.raw\""), "script should set the output file with .raw extension" ); } + + #[test] + fn test_create_build_script_squashfs_var_files_excludes() { + let cmd = make_cmd("my-ext"); + let var_files = vec![ + "var/lib/docker/**".to_string(), + "var/lib/myapp/data".to_string(), + ]; + let script = cmd.create_build_script("1.0.0", "sysext", 0, "squashfs", &var_files); + + assert!( + script.contains("-e \"var/lib/docker\""), + "squashfs script should exclude var/lib/docker" + ); + assert!( + script.contains("-e \"var/lib/myapp/data\""), + "squashfs script should exclude var/lib/myapp/data" + ); + } + + #[test] + fn test_create_build_script_erofs_var_files_excludes() { + let cmd = make_cmd("my-ext"); + let var_files = vec!["var/lib/docker/**".to_string()]; + let script = cmd.create_build_script("1.0.0", "sysext", 0, "erofs", &var_files); + + assert!( + script.contains("--exclude-path=var/lib/docker"), + "erofs script should exclude var/lib/docker" + ); + } + + #[test] + fn test_create_build_script_no_var_files_no_excludes() { + let cmd = make_cmd("my-ext"); + let script = cmd.create_build_script("1.0.0", "sysext", 0, "squashfs", &[]); + + assert!( + !script.contains("-e \"var/"), + "script should not contain exclude flags when no var_files" + ); + } } diff --git a/src/commands/runtime/build.rs b/src/commands/runtime/build.rs index 35481fe..00a197f 100644 --- a/src/commands/runtime/build.rs +++ b/src/commands/runtime/build.rs @@ -227,7 +227,7 @@ impl RuntimeBuildCommand { .flatten(); let current_inputs = merged_runtime .as_ref() - .and_then(|mr| compute_runtime_input_hash(mr, &self.runtime_name).ok()); + .and_then(|mr| compute_runtime_input_hash(mr, &self.runtime_name, parsed).ok()); let validation = validate_stamps_batch( &required, output.as_deref().unwrap_or(""), @@ -429,6 +429,37 @@ impl RuntimeBuildCommand { Some(env_vars) }; + // If any extension in this runtime declares docker_images, add --privileged + // to container args so dockerd can run inside the SDK container (Docker-in-Docker) + let build_container_args = { + let ext_list: Vec<&str> = merged_runtime + .as_ref() + .and_then(|rt| rt.get("extensions")) + .and_then(|e| e.as_sequence()) + .map(|seq| seq.iter().filter_map(|v| v.as_str()).collect()) + .unwrap_or_default(); + + let has_docker_images = ext_list.iter().any(|ext_name| { + parsed + .get("extensions") + .and_then(|e| e.get(*ext_name)) + .map(|ext| !crate::utils::config::get_docker_images(ext).is_empty()) + .unwrap_or(false) + }); + + if has_docker_images { + let mut args = merged_container_args + .clone() + .unwrap_or_default(); + if !args.iter().any(|a| a == "--privileged") { + args.push("--privileged".to_string()); + } + Some(args) + } else { + merged_container_args.clone() + } + }; + let run_config = RunConfig { container_image: container_image.to_string(), target: target_arch.to_string(), @@ -438,7 +469,7 @@ impl RuntimeBuildCommand { interactive: false, // build script runs non-interactively repo_url: repo_url.cloned(), repo_release: repo_release.cloned(), - container_args: merged_container_args.clone(), + container_args: build_container_args, dnf_args: self.dnf_args.clone(), env_vars, // runs_on handled by shared context @@ -463,7 +494,7 @@ impl RuntimeBuildCommand { let merged_runtime = config .get_merged_runtime_config(&self.runtime_name, target_arch, &self.config_path)? .unwrap_or_default(); - let inputs = compute_runtime_input_hash(&merged_runtime, &self.runtime_name)?; + let inputs = compute_runtime_input_hash(&merged_runtime, &self.runtime_name, parsed)?; let outputs = StampOutputs::default(); let stamp = Stamp::runtime_build(&self.runtime_name, target_arch, inputs, outputs); let stamp_script = generate_write_stamp_script(&stamp)?; @@ -767,11 +798,188 @@ cp "$VAR_DIR/lib/avocado/metadata/root.json" "$VAR_DIR/lib/avocado/metadata/1.ro echo "Provisioned update authority: metadata/root.json""# ); + // Extension list from runtime config (used by var_files and docker priming) + let ext_list: Vec<&str> = merged_runtime + .get("extensions") + .and_then(|e| e.as_sequence()) + .map(|seq| seq.iter().filter_map(|v| v.as_str()).collect()) + .unwrap_or_default(); + + // Build var_files section: apply extension var_files to var staging in reverse order + // (last in extensions list applied first = lowest priority, first applied last = wins conflicts) + let var_files_section = { + let mut var_files_commands = Vec::new(); + + // Process in reverse order so first-listed extension wins conflicts + for ext_name in ext_list.iter().rev() { + let var_files = parsed + .get("extensions") + .and_then(|e| e.get(*ext_name)) + .map(crate::utils::config::get_ext_var_files) + .unwrap_or_default(); + + if !var_files.is_empty() { + for pattern in &var_files { + // Strip trailing glob suffixes and leading "var/" to get the dest path under $VAR_DIR + let clean_pattern = pattern + .trim_end_matches("/**") + .trim_end_matches("/*"); + // The pattern is relative to the sysroot (e.g., "var/lib/docker") + // $VAR_DIR maps to /var on the target, so strip the leading "var/" for dest + let dest = clean_pattern.strip_prefix("var/").unwrap_or(clean_pattern); + var_files_commands.push(format!( + r#" +if [ -d "$AVOCADO_EXT_SYSROOTS/{ext_name}/{clean_pattern}" ]; then + echo " Applying var files from extension '{ext_name}': {clean_pattern}/" + mkdir -p "$VAR_DIR/{dest}" + rsync -a "$AVOCADO_EXT_SYSROOTS/{ext_name}/{clean_pattern}/" "$VAR_DIR/{dest}/" +elif [ -f "$AVOCADO_EXT_SYSROOTS/{ext_name}/{clean_pattern}" ]; then + echo " Applying var file from extension '{ext_name}': {clean_pattern}" + mkdir -p "$(dirname "$VAR_DIR/{dest}")" + cp -f "$AVOCADO_EXT_SYSROOTS/{ext_name}/{clean_pattern}" "$VAR_DIR/{dest}" +fi"# + )); + } + } + } + + if var_files_commands.is_empty() { + "# No extension var_files to apply".to_string() + } else { + format!( + "echo \"Applying extension var files to var partition...\"\n{}", + var_files_commands.join("\n") + ) + } + }; + + // Build runtime-level var_files section + let runtime_var_files_section = { + let runtime_var_files = crate::utils::config::get_runtime_var_files(&merged_runtime); + if runtime_var_files.is_empty() { + "# No runtime var_files to apply".to_string() + } else { + let commands: Vec = runtime_var_files + .iter() + .map(|mapping| { + format!( + r#" +if [ -e "/opt/src/{source}" ]; then + mkdir -p "$VAR_DIR/{dest}" + rsync -a "/opt/src/{source}" "$VAR_DIR/{dest}" + echo " Copied runtime var_files: {source} -> {dest}" +else + echo "WARNING: runtime var_files source not found: /opt/src/{source}" +fi"#, + source = mapping.source, + dest = mapping.dest + ) + }) + .collect(); + format!( + "echo \"Applying runtime var files to var partition...\"\n{}", + commands.join("\n") + ) + } + }; + + // Build Docker image priming section + // Collect docker_images from all extensions in the runtime + let docker_section = { + let docker_images: Vec = ext_list + .iter() + .flat_map(|ext_name| { + parsed + .get("extensions") + .and_then(|e| e.get(*ext_name)) + .map(crate::utils::config::get_docker_images) + .unwrap_or_default() + }) + .collect(); + if docker_images.is_empty() { + "# No Docker images to prime".to_string() + } else { + let pull_commands: Vec = docker_images + .iter() + .map(|img| { + format!( + r#"docker --host unix:///tmp/avocado-dockerd.sock pull --platform "linux/$DOCKER_ARCH" "{image}:{tag}" +echo " Primed: {image}:{tag}""#, + image = img.image, + tag = img.tag + ) + }) + .collect(); + + format!( + r#"# Prime Docker image cache on var partition +echo "Priming Docker images on var partition..." +mkdir -p "$VAR_DIR/lib/docker" + +# Verify dockerd is available +if ! command -v dockerd >/dev/null 2>&1; then + echo "ERROR: dockerd not found in SDK container. Docker image priming requires dockerd, containerd, runc, and docker CLI." + exit 1 +fi + +# Map target arch to Docker platform +# Use OECORE_TARGET_ARCH (CPU arch like x86_64/aarch64) from SDK environment +DOCKER_TARGET_ARCH="${{OECORE_TARGET_ARCH:-$TARGET_ARCH}}" +case "$DOCKER_TARGET_ARCH" in + aarch64) DOCKER_ARCH="arm64" ;; + x86_64) DOCKER_ARCH="amd64" ;; + *) echo "WARNING: Unknown target architecture '$DOCKER_TARGET_ARCH' for Docker platform mapping, defaulting to amd64"; DOCKER_ARCH="amd64" ;; +esac + +# Start temporary dockerd with data-root pointing at var staging +dockerd --data-root "$VAR_DIR/lib/docker" \ + --host unix:///tmp/avocado-dockerd.sock \ + --iptables=false --ip-masq=false \ + --bridge=none \ + --exec-root /tmp/avocado-dockerd \ + --pidfile /tmp/avocado-dockerd.pid \ + >/tmp/avocado-dockerd.log 2>&1 & +DOCKERD_PID=$! + +# Wait for dockerd to be ready +echo "Waiting for temporary dockerd..." +for i in $(seq 1 30); do + if docker --host unix:///tmp/avocado-dockerd.sock info >/dev/null 2>&1; then + break + fi + if ! kill -0 $DOCKERD_PID 2>/dev/null; then + echo "ERROR: dockerd exited unexpectedly. Check /tmp/avocado-dockerd.log" + cat /tmp/avocado-dockerd.log + exit 1 + fi + sleep 1 +done + +if ! docker --host unix:///tmp/avocado-dockerd.sock info >/dev/null 2>&1; then + echo "ERROR: dockerd failed to start within 30 seconds" + kill $DOCKERD_PID 2>/dev/null || true + cat /tmp/avocado-dockerd.log + exit 1 +fi + +echo "Pulling Docker images for platform linux/$DOCKER_ARCH..." +{pull_commands} + +# Stop temporary dockerd +kill $DOCKERD_PID 2>/dev/null || true +wait $DOCKERD_PID 2>/dev/null || true +rm -f /tmp/avocado-dockerd.sock /tmp/avocado-dockerd.pid /tmp/avocado-dockerd.log +echo "Docker image priming complete.""#, + pull_commands = pull_commands.join("\n") + ) + } + }; + let script = format!( r#" # Set common variables -RUNTIME_NAME="{}" -TARGET_ARCH="{}" +RUNTIME_NAME="{runtime_name}" +TARGET_ARCH="{target_arch}" VAR_DIR=$AVOCADO_PREFIX/runtimes/$RUNTIME_NAME/var-staging mkdir -p "$VAR_DIR/lib/avocado/images" @@ -790,9 +998,12 @@ rm -f "$RUNTIME_EXT_DIR"/*.raw 2>/dev/null || true # Copy required extension images from global output/extensions to runtime-specific location echo "Copying required extension images to runtime-specific directory..." -{} -{} -{} +{copy_section} +{var_files_section} +{runtime_var_files_section} +{manifest_section} +{update_authority_section} +{docker_section} # Potential future SDK target hook. # echo "Run: avocado-pre-image-var-$TARGET_ARCH $RUNTIME_NAME" @@ -805,11 +1016,14 @@ mkfs.btrfs -r "$VAR_DIR" \ echo -e "\033[94m[INFO]\033[0m Running SDK lifecycle hook 'avocado-build' for '$TARGET_ARCH'." avocado-build-$TARGET_ARCH $RUNTIME_NAME "#, - self.runtime_name, - target_arch, - copy_section, - manifest_section, - update_authority_section + runtime_name = self.runtime_name, + target_arch = target_arch, + copy_section = copy_section, + var_files_section = var_files_section, + runtime_var_files_section = runtime_var_files_section, + manifest_section = manifest_section, + update_authority_section = update_authority_section, + docker_section = docker_section, ); Ok(script) diff --git a/src/commands/runtime/install.rs b/src/commands/runtime/install.rs index d6eae73..ac9370c 100644 --- a/src/commands/runtime/install.rs +++ b/src/commands/runtime/install.rs @@ -272,7 +272,7 @@ impl RuntimeInstallCommand { &target_arch, &self.config_path, )? { - let inputs = compute_runtime_input_hash(&merged_runtime, runtime_name)?; + let inputs = compute_runtime_input_hash(&merged_runtime, runtime_name, parsed)?; let outputs = StampOutputs::default(); let stamp = Stamp::runtime_install(runtime_name, &target_arch, inputs, outputs); let stamp_script = generate_write_stamp_script(&stamp)?; diff --git a/src/utils/config.rs b/src/utils/config.rs index 9a7426e..b066d44 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -491,6 +491,70 @@ pub struct DistroRepoConfig { pub releasever: Option, } +/// Reference to a Docker image for priming on the var partition at build time. +#[derive(Debug, Clone)] +pub struct DockerImageRef { + pub image: String, + pub tag: String, +} + +/// Mapping of a source file/directory to a destination on the var partition. +#[derive(Debug, Clone)] +pub struct VarFileMapping { + pub source: String, + pub dest: String, +} + +/// Extract var_files glob patterns from an extension config value. +/// Returns an empty Vec if no var_files are configured. +pub fn get_ext_var_files(ext_config: &serde_yaml::Value) -> Vec { + ext_config + .get("var_files") + .and_then(|v| v.as_sequence()) + .map(|arr| { + arr.iter() + .filter_map(|v| v.as_str().map(|s| s.to_string())) + .collect() + }) + .unwrap_or_default() +} + +/// Extract docker_images from a config value (extension or runtime). +/// Returns an empty Vec if no docker_images are configured. +pub fn get_docker_images(config: &serde_yaml::Value) -> Vec { + config + .get("docker_images") + .and_then(|v| v.as_sequence()) + .map(|arr| { + arr.iter() + .filter_map(|entry| { + let image = entry.get("image")?.as_str()?.to_string(); + let tag = entry.get("tag")?.as_str()?.to_string(); + Some(DockerImageRef { image, tag }) + }) + .collect() + }) + .unwrap_or_default() +} + +/// Extract var_files source/dest mappings from a runtime config value. +/// Returns an empty Vec if no var_files are configured. +pub fn get_runtime_var_files(runtime_config: &serde_yaml::Value) -> Vec { + runtime_config + .get("var_files") + .and_then(|v| v.as_sequence()) + .map(|arr| { + arr.iter() + .filter_map(|entry| { + let source = entry.get("source")?.as_str()?.to_string(); + let dest = entry.get("dest")?.as_str()?.to_string(); + Some(VarFileMapping { source, dest }) + }) + .collect() + }) + .unwrap_or_default() +} + /// Helper module for deserializing signing keys list mod signing_keys_deserializer { use serde::{Deserialize, Deserializer}; @@ -1227,6 +1291,7 @@ impl Config { "stone_include_paths", "stone_manifest", "signing", + "var_files", ] .contains(&key_str) { @@ -1274,6 +1339,8 @@ impl Config { "confext", "sysext", "overlay", + "var_files", + "docker_images", ] .contains(&key_str) { @@ -7738,4 +7805,114 @@ sdk: let msg = result.unwrap_err().to_string(); assert!(msg.contains("Invalid cli_requirement")); } + + #[test] + fn test_get_ext_var_files_returns_patterns() { + let yaml: serde_yaml::Value = serde_yaml::from_str( + r#" +version: "1.0.0" +types: [sysext] +var_files: + - "var/lib/docker/**" + - "var/lib/myapp/data/" +"#, + ) + .unwrap(); + + let result = get_ext_var_files(&yaml); + assert_eq!(result.len(), 2); + assert_eq!(result[0], "var/lib/docker/**"); + assert_eq!(result[1], "var/lib/myapp/data/"); + } + + #[test] + fn test_get_ext_var_files_returns_empty_when_missing() { + let yaml: serde_yaml::Value = serde_yaml::from_str( + r#" +version: "1.0.0" +types: [sysext] +"#, + ) + .unwrap(); + + let result = get_ext_var_files(&yaml); + assert!(result.is_empty()); + } + + #[test] + fn test_get_docker_images_returns_refs() { + let yaml: serde_yaml::Value = serde_yaml::from_str( + r#" +extensions: [base] +docker_images: + - image: "docker.io/library/redis" + tag: "7-alpine" + - image: "docker.io/library/nginx" + tag: "1.25" +"#, + ) + .unwrap(); + + let result = get_docker_images(&yaml); + assert_eq!(result.len(), 2); + assert_eq!(result[0].image, "docker.io/library/redis"); + assert_eq!(result[0].tag, "7-alpine"); + assert_eq!(result[1].image, "docker.io/library/nginx"); + assert_eq!(result[1].tag, "1.25"); + } + + #[test] + fn test_get_docker_images_returns_empty_when_missing() { + let yaml: serde_yaml::Value = serde_yaml::from_str( + r#" +extensions: [base] +"#, + ) + .unwrap(); + + let result = get_docker_images(&yaml); + assert!(result.is_empty()); + } + + #[test] + fn test_get_docker_images_skips_incomplete_entries() { + let yaml: serde_yaml::Value = serde_yaml::from_str( + r#" +docker_images: + - image: "docker.io/library/redis" + - image: "docker.io/library/nginx" + tag: "1.25" +"#, + ) + .unwrap(); + + // First entry missing tag should be skipped + let result = get_docker_images(&yaml); + assert_eq!(result.len(), 1); + assert_eq!(result[0].image, "docker.io/library/nginx"); + } + + #[test] + fn test_get_runtime_var_files_returns_mappings() { + let yaml: serde_yaml::Value = serde_yaml::from_str( + r#" +var_files: + - source: "files/var-data/" + dest: "lib/myapp/" +"#, + ) + .unwrap(); + + let result = get_runtime_var_files(&yaml); + assert_eq!(result.len(), 1); + assert_eq!(result[0].source, "files/var-data/"); + assert_eq!(result[0].dest, "lib/myapp/"); + } + + #[test] + fn test_get_runtime_var_files_returns_empty_when_missing() { + let yaml: serde_yaml::Value = serde_yaml::from_str("extensions: [base]").unwrap(); + let result = get_runtime_var_files(&yaml); + assert!(result.is_empty()); + } } diff --git a/src/utils/stamps.rs b/src/utils/stamps.rs index e2e0a23..46efa54 100644 --- a/src/utils/stamps.rs +++ b/src/utils/stamps.rs @@ -853,6 +853,13 @@ pub fn compute_ext_input_hash(config: &serde_yaml::Value, ext_name: &str) -> Res types.clone(), ); } + // Include var_files as they affect which files are excluded from the .raw image + if let Some(var_files) = ext.get("var_files") { + hash_data.insert( + serde_yaml::Value::String(format!("ext.{ext_name}.var_files")), + var_files.clone(), + ); + } } let config_hash = compute_config_hash(&serde_yaml::Value::Mapping(hash_data))?; @@ -860,10 +867,12 @@ pub fn compute_ext_input_hash(config: &serde_yaml::Value, ext_name: &str) -> Res } /// Compute input hash for runtime install -/// Includes: runtime..dependencies (merged with target), kernel config +/// Includes: runtime..dependencies (merged with target), kernel config, +/// extension docker_images (affects var partition priming) pub fn compute_runtime_input_hash( merged_runtime: &serde_yaml::Value, runtime_name: &str, + parsed: &serde_yaml::Value, ) -> Result { let mut hash_data = serde_yaml::Mapping::new(); @@ -891,6 +900,36 @@ pub fn compute_runtime_input_hash( ); } + // Include docker_images from extensions in this runtime + // (changes to extension docker_images should trigger runtime rebuild to re-prime images) + if let Some(ext_list) = merged_runtime + .get("extensions") + .and_then(|e| e.as_sequence()) + { + for ext_val in ext_list { + if let Some(ext_name) = ext_val.as_str() { + if let Some(docker_images) = parsed + .get("extensions") + .and_then(|e| e.get(ext_name)) + .and_then(|ext| ext.get("docker_images")) + { + hash_data.insert( + serde_yaml::Value::String(format!("ext.{ext_name}.docker_images")), + docker_images.clone(), + ); + } + } + } + } + + // Include runtime-level var_files if specified + if let Some(var_files) = merged_runtime.get("var_files") { + hash_data.insert( + serde_yaml::Value::String(format!("runtime.{runtime_name}.var_files")), + var_files.clone(), + ); + } + let config_hash = compute_config_hash(&serde_yaml::Value::Mapping(hash_data))?; Ok(StampInputs::new(config_hash)) } @@ -2387,8 +2426,11 @@ kernel: ) .unwrap(); - let hash_without = compute_runtime_input_hash(&without_kernel, "dev").unwrap(); - let hash_with = compute_runtime_input_hash(&with_kernel, "dev").unwrap(); + let empty_parsed = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let hash_without = + compute_runtime_input_hash(&without_kernel, "dev", &empty_parsed).unwrap(); + let hash_with = + compute_runtime_input_hash(&with_kernel, "dev", &empty_parsed).unwrap(); // Hashes should differ when kernel config is added assert_ne!(hash_without.config_hash, hash_with.config_hash); @@ -2418,10 +2460,130 @@ kernel: ) .unwrap(); - let hash_package = compute_runtime_input_hash(&kernel_package, "dev").unwrap(); - let hash_compile = compute_runtime_input_hash(&kernel_compile, "dev").unwrap(); + let empty_parsed = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let hash_package = + compute_runtime_input_hash(&kernel_package, "dev", &empty_parsed).unwrap(); + let hash_compile = + compute_runtime_input_hash(&kernel_compile, "dev", &empty_parsed).unwrap(); // Switching kernel mode should produce a different hash assert_ne!(hash_package.config_hash, hash_compile.config_hash); } + + #[test] + fn test_ext_input_hash_includes_var_files() { + let config_without: serde_yaml::Value = serde_yaml::from_str( + r#" +extensions: + my-ext: + version: "1.0.0" + types: [sysext] + packages: + foo: "*" +"#, + ) + .unwrap(); + + let config_with: serde_yaml::Value = serde_yaml::from_str( + r#" +extensions: + my-ext: + version: "1.0.0" + types: [sysext] + packages: + foo: "*" + var_files: + - "var/lib/docker/**" +"#, + ) + .unwrap(); + + let hash_without = compute_ext_input_hash(&config_without, "my-ext").unwrap(); + let hash_with = compute_ext_input_hash(&config_with, "my-ext").unwrap(); + + assert_ne!( + hash_without.config_hash, hash_with.config_hash, + "Adding var_files should change the ext input hash" + ); + } + + #[test] + fn test_runtime_input_hash_includes_ext_docker_images() { + // Runtime references extension "app" which has docker_images + let runtime: serde_yaml::Value = serde_yaml::from_str( + r#" +packages: + avocado-runtime: "*" +extensions: + - app +"#, + ) + .unwrap(); + + let parsed_without: serde_yaml::Value = serde_yaml::from_str( + r#" +extensions: + app: + version: "1.0.0" + types: [sysext] +"#, + ) + .unwrap(); + + let parsed_with: serde_yaml::Value = serde_yaml::from_str( + r#" +extensions: + app: + version: "1.0.0" + types: [sysext] + docker_images: + - image: "docker.io/library/redis" + tag: "7-alpine" +"#, + ) + .unwrap(); + + let hash_without = + compute_runtime_input_hash(&runtime, "dev", &parsed_without).unwrap(); + let hash_with = + compute_runtime_input_hash(&runtime, "dev", &parsed_with).unwrap(); + + assert_ne!( + hash_without.config_hash, hash_with.config_hash, + "Adding docker_images to an extension should change the runtime input hash" + ); + } + + #[test] + fn test_runtime_input_hash_includes_var_files() { + let runtime_without: serde_yaml::Value = serde_yaml::from_str( + r#" +packages: + avocado-runtime: "*" +"#, + ) + .unwrap(); + + let runtime_with: serde_yaml::Value = serde_yaml::from_str( + r#" +packages: + avocado-runtime: "*" +var_files: + - source: "files/data/" + dest: "lib/myapp/" +"#, + ) + .unwrap(); + + let empty_parsed = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); + let hash_without = + compute_runtime_input_hash(&runtime_without, "dev", &empty_parsed).unwrap(); + let hash_with = + compute_runtime_input_hash(&runtime_with, "dev", &empty_parsed).unwrap(); + + assert_ne!( + hash_without.config_hash, hash_with.config_hash, + "Adding var_files should change the runtime input hash" + ); + } } From 0ae698046c44c7a1d65b8f4847126aea59243d0b Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Wed, 4 Mar 2026 23:58:16 -0500 Subject: [PATCH 02/12] fix host Docker contamination from inner dockerd during image priming Two issues caused the host Docker daemon to break after a runtime build with docker_images configured: 1. Cgroup contamination: the inner dockerd wrote into the host's /sys/fs/cgroup (via --privileged bind-mount). Fix: overmount /sys/fs/cgroup with a private cgroup2/tmpfs before starting the inner dockerd and umount it on exit. 2. docker0 deletion: with --network=host the inner dockerd shares the host network namespace and removes the docker0 bridge when it exits, breaking subsequent docker run calls on the host. Fix: save docker0's IP before starting the inner dockerd and restore the bridge in the cleanup trap if it was deleted. Also add --exec-opt native.cgroupdriver=cgroupfs to avoid systemd-cgroup interaction, use a trap for cleanup so it runs even on pull failure, and read remote extension configs directly from the Docker volume mountpoint (get_volume_mountpoint_sync) before falling back to spinning up a throwaway container, making the version-field merge more reliable. --- .../docker-image-priming-var-provisioning.md | 266 ++++++++++++++++++ src/commands/runtime/build.rs | 48 +++- src/utils/config.rs | 71 +++-- 3 files changed, 362 insertions(+), 23 deletions(-) create mode 100644 docs/features/docker-image-priming-var-provisioning.md diff --git a/docs/features/docker-image-priming-var-provisioning.md b/docs/features/docker-image-priming-var-provisioning.md new file mode 100644 index 0000000..c422e25 --- /dev/null +++ b/docs/features/docker-image-priming-var-provisioning.md @@ -0,0 +1,266 @@ +# Docker Image Priming & Var Partition Provisioning + +## Context + +Avocado runtimes build a var partition (`avocado-image-var-*.btrfs`) containing extension `.raw` images and avocado metadata. However, there's no way to ship pre-pulled Docker images or inject arbitrary files into the var partition. Devices that need Docker containers available at first boot must pull images over the network, which is slow and may not be feasible in air-gapped environments. + +We need to support priming Docker's image cache on the var partition at build time so Docker ships with images locally pulled and available, and more generally support adding files to `/var` from extensions. + +**Prior art:** Yocto's `meta-virtualization` layer (`container-cross-install.bbclass`, `vrunner.sh`) solves this by booting a QEMU VM with dockerd, importing OCI images via skopeo, and exporting `/var/lib/docker/` as a tarball. We adapt a similar approach using Docker-in-Docker inside the SDK container. + +## Config Schema + +### First-class Docker image priming (extension-level) + +Extensions declare `docker_images` specifying images to pre-pull at build time. During `runtime build`, images from all extensions in the runtime are collected and pulled via Docker-in-Docker into the var partition. Multiple extensions can each declare their own images — they're all pulled into a single Docker data root. + +```yaml +extensions: + my-app: + types: [sysext] + version: "1.0.0" + docker_images: + - image: "docker.io/library/redis" + tag: "7-alpine" + - image: "docker.io/library/nginx" + tag: "1.25" + sdk: + packages: + nativesdk-docker: "*" +``` + +Minimal config (single image): +```yaml +extensions: + my-app: + types: [sysext] + version: "1.0.0" + docker_images: + - image: "docker.io/library/alpine" + tag: "3.19" + sdk: + packages: + nativesdk-docker: "*" +``` + +Multiple extensions can each declare Docker images: +```yaml +runtimes: + dev: + extensions: [base, app-a, app-b] + +extensions: + app-a: + types: [sysext] + version: "1.0.0" + docker_images: + - image: "docker.io/library/redis" + tag: "7-alpine" + app-b: + types: [sysext] + version: "1.0.0" + docker_images: + - image: "docker.io/library/nginx" + tag: "1.25" +``` + +### Extension var_files (extension-level) + +Extensions can declare glob patterns identifying files in their sysroot's `var/` tree to apply to the var partition instead of into the sysext/confext `.raw` image: + +```yaml +extensions: + my-docker-ext: + types: [sysext] + version: "1.0.0" + var_files: + - "var/lib/docker/**" + - "var/lib/myapp/data/" +``` + +Patterns are relative to the extension sysroot directory (`$AVOCADO_EXT_SYSROOTS/`). Matched files are: +- **Excluded** from the `.raw` sysext/confext image during `ext image` +- **Copied** into the runtime's var staging directory during `runtime build` + +### Runtime var_files (runtime-level) + +Runtimes can specify arbitrary project files to copy into the var partition: + +```yaml +runtimes: + dev: + extensions: [base] + var_files: + - source: "files/var-data/" + dest: "lib/myapp/" +``` + +`source` is relative to the project directory. `dest` is relative to the var partition root (`/var`). + +## Developer Workflow + +```bash +# Build with Docker images pre-cached +avocado ext build # build extensions (including var_files) +avocado ext image # create .raw images (var_files excluded) +avocado runtime build dev # build runtime (applies var_files + primes Docker images) +# Output: avocado-image-var-aarch64.btrfs with /var/lib/docker/ pre-populated +``` + +On target device boot, Docker immediately sees all pre-cached images: +```bash +docker images +# REPOSITORY TAG IMAGE ID SIZE +# redis 7-alpine abc123... 30MB +# nginx 1.25 def456... 140MB +``` + +## Pipeline: `runtime build` with Docker Image Priming + +The existing `runtime build` pipeline is extended with new steps (marked **NEW**): + +1. Resolve extensions, validate stamps (existing) +2. Create `$VAR_DIR` (`var-staging/`) with avocado directory structure (existing) +3. Copy extension `.raw` images to runtime-specific staging (existing) +4. **NEW: Apply extension `var_files`** — Copy matched files from each extension sysroot into `$VAR_DIR` +5. **NEW: Apply runtime `var_files`** — Copy project files into `$VAR_DIR` +6. Generate manifest with content-addressable image IDs (existing) +7. Provision update authority metadata (existing) +8. **NEW: Prime Docker images** — Start temporary dockerd, pull images, stop dockerd +9. `mkfs.btrfs -r "$VAR_DIR"` to create final btrfs image (existing) +10. Run `avocado-build-$TARGET_ARCH` lifecycle hook (existing) + +### Extension var_files ordering + +When multiple extensions contribute `var_files`, they are applied in reverse order of the runtime's `extensions` list: + +```yaml +runtimes: + dev: + extensions: [ext-a, ext-b, ext-c] # ext-a has highest priority +``` + +- `ext-c` var_files applied first (lowest priority) +- `ext-b` var_files applied second +- `ext-a` var_files applied last (highest priority, wins file conflicts) + +This uses `rsync -a` so later copies overwrite earlier ones, giving the first-listed extension the highest priority. + +### Docker image priming approach (Docker-in-Docker) + +Docker images are primed using Docker-in-Docker inside the SDK container. This is the most portable approach — it works on Linux, macOS (Docker Desktop), and Windows (Docker Desktop) since the inner dockerd always runs inside a Linux container regardless of host OS. + +When any extension in the runtime declares `docker_images`, avocado-cli automatically adds `--privileged` to the SDK container invocation so dockerd can run inside the container. Images from all extensions are collected and pulled in a single DinD session. + +During `runtime build`, the generated build script: + +1. Verifies `dockerd` is available in the SDK container +2. Maps target arch to Docker platform (`aarch64` -> `linux/arm64`, `x86_64` -> `linux/amd64`) +3. Starts a temporary `dockerd` with `--data-root "$VAR_DIR/lib/docker"` and a dedicated unix socket +4. Waits for dockerd readiness (poll loop, 30s timeout) +5. Runs `docker pull --platform linux/$DOCKER_ARCH :` for each configured image +6. Stops dockerd, cleans up socket and pid files + +The result: `$VAR_DIR/lib/docker/` contains Docker's overlay2 storage layout with all images pre-cached. When the target boots, Docker reads `/var/lib/docker/` and finds all images immediately available — no network pull, no first-boot import needed. + +**Cross-compilation:** `docker pull --platform linux/arm64` fetches arm64 layers regardless of host architecture. Docker's overlay2 storage format is architecture-independent (filesystem layers + metadata JSON). + +**SDK container requirements:** +- Must include `dockerd`, `containerd`, `runc`, and `docker` CLI (e.g., via `nativesdk-docker` packages) +- The build fails with a clear error if `dockerd` is not found +- The SDK container is automatically run with `--privileged` when any extension declares `docker_images` + +## Extension var_files exclusion from .raw images + +When an extension declares `var_files`, those files must be excluded from the sysext/confext `.raw` image since they belong on the var partition, not in `/usr` or `/etc` overlays. + +During `ext image`, the mkfs command receives exclude flags: +- **squashfs:** `mksquashfs ... -e "var/lib/docker" -e "var/lib/myapp/data"` +- **erofs:** `mkfs.erofs ... --exclude-path=var/lib/docker --exclude-path=var/lib/myapp/data` + +The files remain in the extension sysroot so `runtime build` can copy them into var staging. + +## Implementation Steps + +### 1. Config parsing (`src/utils/config.rs`) + +- Add `"var_files"` and `"docker_images"` to the known extension keys list to prevent them from being treated as target-specific sections +- Add `"var_files"` to the known runtime keys list +- Add `DockerImageRef` struct with `image: String` and `tag: String` fields +- Add `VarFileMapping` struct with `source: String` and `dest: String` fields +- Add helper functions: + - `get_ext_var_files(ext_config: &Value) -> Vec` — extracts glob patterns from extension config + - `get_docker_images(config: &Value) -> Vec` — extracts Docker image references from any config node + - `get_runtime_var_files(runtime_config: &Value) -> Vec` — extracts source/dest file mappings + +### 2. Extension image exclusion (`src/commands/ext/image.rs`) + +- Update `create_build_script()` signature to accept `var_files: &[String]` +- Convert glob patterns to mkfs exclude flags (squashfs `-e` / erofs `--exclude-path`) +- Strip trailing `/**` from glob patterns to get directory paths for exclusion +- Update `create_image()` and `execute()` to read `var_files` from extension config and pass through +- Note: `docker_images` does NOT affect extension `.raw` images — Docker data is pulled directly into var staging during `runtime build`, not into the extension sysroot + +### 3. Runtime build var_files + Docker priming (`src/commands/runtime/build.rs`) + +- In `create_build_script()`, after the existing copy/manifest sections: + - Generate rsync commands for each extension's `var_files` in reverse extensions-list order + - Generate rsync commands for runtime-level `var_files` + - Collect `docker_images` from all extensions in the runtime and generate the Docker priming script section (dockerd start, pull, stop) +- Insert all new sections before the `mkfs.btrfs` command +- When any extension has `docker_images`, automatically add `--privileged` to SDK container args + +### 4. Stamp invalidation (`src/utils/stamps.rs`) + +- Include `var_files` in `compute_ext_input_hash()` so extension var_files changes invalidate ext image stamps +- Include extension `docker_images` and runtime `var_files` in `compute_runtime_input_hash()` so changes invalidate runtime build stamps + +## Critical Files + +| File | Change | +|------|--------| +| `src/utils/config.rs` | Add `DockerImageRef`, `VarFileMapping` structs; add known keys; add helper functions | +| `src/commands/ext/image.rs` | Add var_files exclusion to mkfs commands | +| `src/commands/runtime/build.rs` | Add var_files application and Docker priming to build script; collect docker_images from extensions | +| `src/commands/runtime/install.rs` | Pass parsed config to stamp hash computation | +| `src/utils/stamps.rs` | Include extension docker_images and var_files in hash computations | + +## Verification + +1. `cargo build` — confirms compilation +2. `cargo test` — new unit tests pass, existing tests unchanged +3. Manual test with Docker priming config: + ```yaml + extensions: + my-app: + types: [sysext] + version: "1.0.0" + docker_images: + - image: "docker.io/library/alpine" + tag: "3.19" + sdk: + packages: + nativesdk-docker: "*" + runtimes: + dev: + extensions: [base, my-app] + ``` + ```bash + avocado runtime build dev + # Verify: mount the btrfs image and check /var/lib/docker/ contains overlay2 data + ``` +4. Manual test with extension var_files: + ```yaml + extensions: + my-ext: + types: [sysext] + version: "1.0.0" + var_files: + - "var/lib/myapp/**" + ``` + ```bash + avocado ext image my-ext + # Verify: .raw image does NOT contain var/lib/myapp/ + avocado runtime build dev + # Verify: btrfs image contains /var/lib/myapp/ files from extension sysroot + ``` diff --git a/src/commands/runtime/build.rs b/src/commands/runtime/build.rs index 00a197f..7945948 100644 --- a/src/commands/runtime/build.rs +++ b/src/commands/runtime/build.rs @@ -931,9 +931,48 @@ case "$DOCKER_TARGET_ARCH" in *) echo "WARNING: Unknown target architecture '$DOCKER_TARGET_ARCH' for Docker platform mapping, defaulting to amd64"; DOCKER_ARCH="amd64" ;; esac -# Start temporary dockerd with data-root pointing at var staging +# The SDK container may have the host's /sys bind-mounted (-v /sys:/sys), +# and --privileged gives write access even without that flag. +# Overmount /sys/fs/cgroup with a private hierarchy so the inner dockerd +# cannot write into the host's cgroup tree. When we umount afterwards +# the host hierarchy is restored untouched. +_AVOCADO_CGROUP_PRIVATE=0 +if mount -t cgroup2 cgroup2 /sys/fs/cgroup 2>/dev/null; then + _AVOCADO_CGROUP_PRIVATE=1 +elif mount -t tmpfs tmpfs /sys/fs/cgroup 2>/dev/null; then + _AVOCADO_CGROUP_PRIVATE=1 +else + echo "WARNING: Could not overmount /sys/fs/cgroup — inner dockerd may leave stale cgroup entries on the host." +fi + +# When the SDK container uses --network=host the inner dockerd shares the +# host network namespace and may delete the host's docker0 bridge on exit. +# Save its address now so we can restore it if needed. +_DOCKER0_ADDR="" +if ip link show docker0 >/dev/null 2>&1; then + _DOCKER0_ADDR=$(ip -4 addr show docker0 2>/dev/null | awk '/inet /{{print $2}}' | head -1) +fi + +_avocado_docker_cleanup() {{ + kill $DOCKERD_PID 2>/dev/null || true + wait $DOCKERD_PID 2>/dev/null || true + rm -f /tmp/avocado-dockerd.sock /tmp/avocado-dockerd.pid /tmp/avocado-dockerd.log + [ "$_AVOCADO_CGROUP_PRIVATE" = "1" ] && umount /sys/fs/cgroup 2>/dev/null || true + # Restore docker0 if the inner dockerd removed it from the host network namespace + if [ -n "$_DOCKER0_ADDR" ] && ! ip link show docker0 >/dev/null 2>&1; then + echo "NOTE: inner dockerd removed host docker0 — restoring." + ip link add name docker0 type bridge 2>/dev/null || true + ip addr add "$_DOCKER0_ADDR" dev docker0 2>/dev/null || true + ip link set docker0 up 2>/dev/null || true + fi +}} +trap _avocado_docker_cleanup EXIT + +# Start temporary dockerd with data-root pointing at var staging. +# cgroupdriver=cgroupfs avoids systemd-cgroup interaction inside the container. dockerd --data-root "$VAR_DIR/lib/docker" \ --host unix:///tmp/avocado-dockerd.sock \ + --exec-opt native.cgroupdriver=cgroupfs \ --iptables=false --ip-masq=false \ --bridge=none \ --exec-root /tmp/avocado-dockerd \ @@ -957,7 +996,6 @@ done if ! docker --host unix:///tmp/avocado-dockerd.sock info >/dev/null 2>&1; then echo "ERROR: dockerd failed to start within 30 seconds" - kill $DOCKERD_PID 2>/dev/null || true cat /tmp/avocado-dockerd.log exit 1 fi @@ -965,10 +1003,8 @@ fi echo "Pulling Docker images for platform linux/$DOCKER_ARCH..." {pull_commands} -# Stop temporary dockerd -kill $DOCKERD_PID 2>/dev/null || true -wait $DOCKERD_PID 2>/dev/null || true -rm -f /tmp/avocado-dockerd.sock /tmp/avocado-dockerd.pid /tmp/avocado-dockerd.log +trap - EXIT +_avocado_docker_cleanup echo "Docker image priming complete.""#, pull_commands = pull_commands.join("\n") ) diff --git a/src/utils/config.rs b/src/utils/config.rs index b066d44..3a968f4 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -1028,27 +1028,64 @@ impl Config { } } } else if let Some(vs) = &volume_state { - // Method 2: Use container command to read from Docker volume - if verbose { - eprintln!( - "[DEBUG] Trying via container command (volume: {})", - vs.volume_name - ); - } - match Self::read_extension_config_via_container(vs, &resolved_target, &ext_name) - { - Ok(content) => { + // Method 2: Read directly from the Docker volume's host mountpoint. + // This is fast and reliable — no throwaway container needed. + // Falls back to Method 3 if the mountpoint isn't accessible (e.g. permission denied). + let host_content = Self::get_volume_mountpoint_sync(vs) + .ok() + .and_then(|mountpoint| { + let p = mountpoint + .join(&resolved_target) + .join("includes") + .join(&ext_name) + .join("avocado.yaml"); if verbose { - eprintln!("[DEBUG] Read {} bytes via container", content.len()); + eprintln!( + "[DEBUG] Trying host volume path: {}", + p.display() + ); } - content + fs::read_to_string(&p).ok() + }); + + if let Some(content) = host_content { + if verbose { + eprintln!( + "[DEBUG] Read {} bytes from host volume mountpoint", + content.len() + ); } - Err(e) => { - if verbose { - eprintln!("[DEBUG] Container read failed: {e}"); + content + } else { + // Method 3: Fall back to spinning up a container to read from the volume. + // Used when the host mountpoint isn't directly accessible. + if verbose { + eprintln!( + "[DEBUG] Host path not accessible, trying via container command (volume: {})", + vs.volume_name + ); + } + match Self::read_extension_config_via_container( + vs, + &resolved_target, + &ext_name, + ) { + Ok(content) => { + if verbose { + eprintln!( + "[DEBUG] Read {} bytes via container", + content.len() + ); + } + content + } + Err(e) => { + if verbose { + eprintln!("[DEBUG] Container read failed: {e}"); + } + // Extension not installed yet or config not found, skip + continue; } - // Extension not installed yet or config not found, skip - continue; } } } else { From 4030fa60798d35cca4daf19e10ba5319cacf0b12 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 00:19:58 -0500 Subject: [PATCH 03/12] detect broken Docker daemon and suggest restart When a container command fails due to a known Docker daemon health problem, print an actionable hint instead of the confusing low-level error. Currently handles two cases: - Missing docker0 bridge (bridge/veth Device does not exist): caused by an inner dockerd tearing down the host bridge on exit. Suggests `sudo systemctl restart docker`. - Daemon not running / socket missing: suggests `sudo systemctl start docker`. The hint is printed alongside the existing verbose error output so no information is lost. --- src/utils/container.rs | 42 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) diff --git a/src/utils/container.rs b/src/utils/container.rs index 38c1e11..1218d63 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -79,6 +79,42 @@ pub fn sdk_arch_to_platform(sdk_arch: &str) -> Result { /// /// # Returns /// Normalized architecture name (e.g., "aarch64", "x86_64") +/// Inspect Docker daemon stderr output and return a user-friendly hint when a +/// known daemon health problem is detected (e.g. broken bridge networking after +/// Docker-in-Docker teardown). Returns `None` for unrecognised errors. +fn docker_daemon_hint(stderr: &str) -> Option { + // Bridge / veth errors indicate that docker0 was removed from the host + // network namespace (commonly caused by an inner dockerd started with + // --network=host tearing down the bridge on exit). + if stderr.contains("docker0") + || (stderr.contains("bridge") && stderr.contains("Device does not exist")) + || stderr.contains("failed to create endpoint") + || stderr.contains("failed to set up container networking") + { + return Some( + "The Docker daemon appears to have a broken network bridge (docker0 missing).\n\ + This can happen after Docker-in-Docker teardown. Fix with:\n\ + \n sudo systemctl restart docker\n" + .to_string(), + ); + } + + // Daemon not responding / socket errors + if stderr.contains("Cannot connect to the Docker daemon") + || stderr.contains("Is the docker daemon running") + || stderr.contains("dial unix") && stderr.contains("connect: no such file or directory") + { + return Some( + "The Docker daemon is not running or its socket is not accessible.\n\ + Start it with:\n\ + \n sudo systemctl start docker\n" + .to_string(), + ); + } + + None +} + pub fn normalize_sdk_arch(sdk_arch: &str) -> Result { match sdk_arch.to_lowercase().as_str() { "aarch64" | "arm64" => Ok("aarch64".to_string()), @@ -956,6 +992,9 @@ impl SdkContainer { OutputLevel::Normal, ); } + if let Some(hint) = docker_daemon_hint(&stderr) { + print_error(&hint, OutputLevel::Normal); + } Ok(None) } } @@ -1216,6 +1255,9 @@ impl SdkContainer { &format!("Container execution failed: {stderr}"), OutputLevel::Normal, ); + if let Some(hint) = docker_daemon_hint(&stderr) { + print_error(&hint, OutputLevel::Normal); + } Ok(false) } } else { From 88357d8c0261f9ff8444943147b7118a073d9e56 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 00:21:38 -0500 Subject: [PATCH 04/12] format code --- src/commands/ext/image.rs | 4 +--- src/commands/runtime/build.rs | 8 ++------ src/utils/config.rs | 30 ++++++++++++++---------------- src/utils/stamps.rs | 12 ++++-------- 4 files changed, 21 insertions(+), 33 deletions(-) diff --git a/src/commands/ext/image.rs b/src/commands/ext/image.rs index abd8752..6612786 100644 --- a/src/commands/ext/image.rs +++ b/src/commands/ext/image.rs @@ -513,9 +513,7 @@ impl ExtImageCommand { .iter() .map(|pattern| { // Strip trailing /** or /* glob suffixes to get the directory path for exclusion - let clean = pattern - .trim_end_matches("/**") - .trim_end_matches("/*"); + let clean = pattern.trim_end_matches("/**").trim_end_matches("/*"); clean.to_string() }) .collect::>(); diff --git a/src/commands/runtime/build.rs b/src/commands/runtime/build.rs index 7945948..e3076d7 100644 --- a/src/commands/runtime/build.rs +++ b/src/commands/runtime/build.rs @@ -448,9 +448,7 @@ impl RuntimeBuildCommand { }); if has_docker_images { - let mut args = merged_container_args - .clone() - .unwrap_or_default(); + let mut args = merged_container_args.clone().unwrap_or_default(); if !args.iter().any(|a| a == "--privileged") { args.push("--privileged".to_string()); } @@ -821,9 +819,7 @@ echo "Provisioned update authority: metadata/root.json""# if !var_files.is_empty() { for pattern in &var_files { // Strip trailing glob suffixes and leading "var/" to get the dest path under $VAR_DIR - let clean_pattern = pattern - .trim_end_matches("/**") - .trim_end_matches("/*"); + let clean_pattern = pattern.trim_end_matches("/**").trim_end_matches("/*"); // The pattern is relative to the sysroot (e.g., "var/lib/docker") // $VAR_DIR maps to /var on the target, so strip the leading "var/" for dest let dest = clean_pattern.strip_prefix("var/").unwrap_or(clean_pattern); diff --git a/src/utils/config.rs b/src/utils/config.rs index 3a968f4..35a1643 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -1031,22 +1031,20 @@ impl Config { // Method 2: Read directly from the Docker volume's host mountpoint. // This is fast and reliable — no throwaway container needed. // Falls back to Method 3 if the mountpoint isn't accessible (e.g. permission denied). - let host_content = Self::get_volume_mountpoint_sync(vs) - .ok() - .and_then(|mountpoint| { - let p = mountpoint - .join(&resolved_target) - .join("includes") - .join(&ext_name) - .join("avocado.yaml"); - if verbose { - eprintln!( - "[DEBUG] Trying host volume path: {}", - p.display() - ); - } - fs::read_to_string(&p).ok() - }); + let host_content = + Self::get_volume_mountpoint_sync(vs) + .ok() + .and_then(|mountpoint| { + let p = mountpoint + .join(&resolved_target) + .join("includes") + .join(&ext_name) + .join("avocado.yaml"); + if verbose { + eprintln!("[DEBUG] Trying host volume path: {}", p.display()); + } + fs::read_to_string(&p).ok() + }); if let Some(content) = host_content { if verbose { diff --git a/src/utils/stamps.rs b/src/utils/stamps.rs index 46efa54..ebe2000 100644 --- a/src/utils/stamps.rs +++ b/src/utils/stamps.rs @@ -2429,8 +2429,7 @@ kernel: let empty_parsed = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); let hash_without = compute_runtime_input_hash(&without_kernel, "dev", &empty_parsed).unwrap(); - let hash_with = - compute_runtime_input_hash(&with_kernel, "dev", &empty_parsed).unwrap(); + let hash_with = compute_runtime_input_hash(&with_kernel, "dev", &empty_parsed).unwrap(); // Hashes should differ when kernel config is added assert_ne!(hash_without.config_hash, hash_with.config_hash); @@ -2543,10 +2542,8 @@ extensions: ) .unwrap(); - let hash_without = - compute_runtime_input_hash(&runtime, "dev", &parsed_without).unwrap(); - let hash_with = - compute_runtime_input_hash(&runtime, "dev", &parsed_with).unwrap(); + let hash_without = compute_runtime_input_hash(&runtime, "dev", &parsed_without).unwrap(); + let hash_with = compute_runtime_input_hash(&runtime, "dev", &parsed_with).unwrap(); assert_ne!( hash_without.config_hash, hash_with.config_hash, @@ -2578,8 +2575,7 @@ var_files: let empty_parsed = serde_yaml::Value::Mapping(serde_yaml::Mapping::new()); let hash_without = compute_runtime_input_hash(&runtime_without, "dev", &empty_parsed).unwrap(); - let hash_with = - compute_runtime_input_hash(&runtime_with, "dev", &empty_parsed).unwrap(); + let hash_with = compute_runtime_input_hash(&runtime_with, "dev", &empty_parsed).unwrap(); assert_ne!( hash_without.config_hash, hash_with.config_hash, From ac01785dea8bc892ea59a6facc5577232cf9eada Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 08:21:45 -0500 Subject: [PATCH 05/12] add SDK cross-compilation RPM packaging feature doc --- .../sdk-cross-compilation-rpm-packaging.md | 267 ++++++++++++++++++ 1 file changed, 267 insertions(+) create mode 100644 docs/features/sdk-cross-compilation-rpm-packaging.md diff --git a/docs/features/sdk-cross-compilation-rpm-packaging.md b/docs/features/sdk-cross-compilation-rpm-packaging.md new file mode 100644 index 0000000..216043e --- /dev/null +++ b/docs/features/sdk-cross-compilation-rpm-packaging.md @@ -0,0 +1,267 @@ +# SDK Cross-Compilation RPM Packaging + +## Context + +avocado-cli can already cross-compile code via `sdk compile` and package extension source directories via `ext package`. However, there's no way to package **compiled artifacts** into RPMs for publishing to private RPM repositories. Developers need a `sdk package` command that takes cross-compiled output, stages it into a sysroot layout, and creates proper architecture-specific RPMs with optional Yocto-inspired sub-package splitting (-dev, -dbg, -src). + +## Config Schema + +Extend `sdk.compile` sections with a `package` block containing `install` (staging script) and RPM config: + +```yaml +sdk: + compile: + my-app: + compile: scripts/compile.sh # existing - cross-compiles code + clean: scripts/clean.sh # existing + packages: # existing - build dependencies + gcc-aarch64-linux-gnu: "*" + + package: # NEW - RPM packaging config + install: scripts/install.sh # stages files to $DESTDIR + version: "1.0.0" # required (semver) + name: my-app # defaults to section name + release: "1" # defaults to "1" + license: "MIT" # defaults to "Unspecified" + summary: "My application" # auto-generated if missing + vendor: "Acme Corp" # defaults to "Unspecified" + url: "https://..." # optional + arch: "aarch64" # defaults to target-derived RPM arch; override if needed + requires: # RPM Requires: dependencies + - "glibc >= 2.17" + + # Omit `files` -> all staged files go in one RPM + # Specify `files` -> only matching files in main RPM + files: + - /usr/bin/* + - /usr/lib/lib*.so.* + + # Sub-packages (Yocto-inspired file selection) + split: + dev: + summary: "Development files for my-app" + requires: + - "my-app = 1.0.0" + files: + - /usr/include/** + - /usr/lib/lib*.so + - /usr/lib/pkgconfig/** + dbg: + summary: "Debug symbols for my-app" + files: + - /usr/lib/debug/** + - /usr/lib/.debug/** +``` + +Minimal config (most common case - single RPM with everything): +```yaml +sdk: + compile: + my-app: + compile: build.sh + package: + install: install.sh + version: "1.0.0" +``` + +## Developer Workflow + +```bash +avocado sdk install # install SDK + build deps +avocado sdk compile my-app # cross-compile +avocado sdk package my-app --out-dir ./rpms # stage + package RPM(s) +# Output: ./rpms/my-app-1.0.0-1.aarch64.rpm +# ./rpms/my-app-dev-1.0.0-1.aarch64.rpm (if split defined) +``` + +## Pipeline: `sdk package
` + +1. Validate SDK install stamp (same as `sdk compile`) +2. Validate compile section has `package` block with `install` script +3. Run `package.install` script with `$DESTDIR=$AVOCADO_SDK_PREFIX/staging/
/` +4. If `split` defined, partition staged files by glob patterns (first match wins) +5. Generate RPM spec with `%package -n` for sub-packages +6. Run `rpmbuild --target ` in container +7. Output to `$AVOCADO_PREFIX/output/packages/` or `--out-dir` + +Key difference from `ext package`: packages compiled binaries (arch-specific), not source (noarch). + +## Coexistence with Extension Compile References + +Extensions already reference `sdk.compile` sections via `extensions..packages..compile`. Adding a `package` block to a compile section does **not** affect extensions — they ignore it and continue using their own `install` script to copy artifacts into the extension sysroot. + +A single compile section can serve both paths simultaneously: + +```yaml +sdk: + compile: + my-app: + compile: build.sh # shared compile step + package: # only used by `sdk package` + install: install.sh + version: "1.0.0" + +extensions: + my-ext: + packages: + my-app: + compile: my-app # reuses sdk.compile.my-app.compile + install: ext-install.sh # extension-specific install (separate script) +``` + +The compile output (`$AVOCADO_BUILD_DIR`) is shared between both paths. The install scripts are independent — `package.install` stages to `$DESTDIR` for RPMs, while the extension install copies to `$AVOCADO_BUILD_EXT_SYSROOT` for sysext/confext. + +## Build Directory Convention (`$AVOCADO_BUILD_DIR`) + +Today compile scripts have no standard location for build output — artifacts may end up in the source tree. We introduce `$AVOCADO_BUILD_DIR` as a per-section, auto-created build output directory. It's opt-in: scripts can use it or ignore it. + +**Location:** `$AVOCADO_SDK_PREFIX/build//` + +This env var is set by both `sdk compile` and `sdk package` (and available to extension install scripts too), so all paths share a common location for compiled artifacts. + +```bash +# compile.sh — any build system can use it +cmake -B $AVOCADO_BUILD_DIR -S . && cmake --build $AVOCADO_BUILD_DIR +# or: cargo build --target-dir $AVOCADO_BUILD_DIR +# or: make O=$AVOCADO_BUILD_DIR +# or: go build -o $AVOCADO_BUILD_DIR/my-app . +``` + +**Implementation:** In `sdk/compile.rs`, set `AVOCADO_BUILD_DIR` alongside the existing `AVOCADO_SDK_PREFIX` when invoking scripts (line ~274). The directory is `mkdir -p`'d before the script runs. Same for `sdk/package.rs` when running the install script, and in `ext/build.rs` when running extension install scripts. + +## Install Script Convention + +The install script receives `$DESTDIR` and copies from `$AVOCADO_BUILD_DIR`: +```bash +#!/bin/bash +# install.sh +mkdir -p $DESTDIR/usr/bin +cp $AVOCADO_BUILD_DIR/my-app $DESTDIR/usr/bin/ +mkdir -p $DESTDIR/etc/my-app +cp config.toml $DESTDIR/etc/my-app/ +``` + +Environment variables available to all scripts: +| Variable | Set by | Purpose | +|----------|--------|---------| +| `$AVOCADO_BUILD_DIR` | compile, package, ext build | Per-section build output dir (opt-in) | +| `$AVOCADO_SDK_PREFIX` | entrypoint | SDK toolchains and sysroots | +| `$AVOCADO_TARGET` | entrypoint | Target architecture | +| `$AVOCADO_PREFIX` | entrypoint | Base prefix for target | +| `$DESTDIR` | package only | Staging root for RPM packaging | +| `$AVOCADO_BUILD_EXT_SYSROOT` | ext build only | Extension sysroot destination | + +## File Selection Algorithm (for split packages) + +For each file in `$DESTDIR`: +1. Check against sub-package patterns in definition order (first match wins) +2. Unmatched files go to main package +3. If `files` specified on main package, only matching files included; others generate warnings +4. If `files` omitted on main package, all unmatched files included +5. Empty sub-packages are skipped with a warning + +## RPM Architecture + +If `package.arch` is explicitly set in config, use that value directly. Otherwise, derive from target triple: +- `aarch64-*` -> `aarch64` +- `x86_64-*` -> `x86_64` +- `armv7-*` -> `armv7hl` +- `riscv64-*` -> `riscv64` +- `i686-*` -> `i686` + +## Implementation Steps + +### 1. Config structs (`src/utils/config.rs`) + +Add after `CompileConfig` (line ~419): + +- `PackageConfig` struct: `install`, `name`, `version`, `release`, `license`, `summary`, `description`, `vendor`, `url`, `arch`, `requires`, `files`, `split` (all Optional except `version` and `install`). `arch` defaults to target-derived RPM arch but can be explicitly set (e.g., `noarch` for pure config packages) +- `SplitPackageConfig` struct: `summary`, `description`, `requires`, `files` (files required) +- Extend `CompileConfig` with `package: Option` (no `install` on CompileConfig — it lives inside PackageConfig) + +`package` is `Option` on `CompileConfig`, so existing configs deserialize unchanged. + +### 2. Add `$AVOCADO_BUILD_DIR` to existing compile/build paths + +- **`src/commands/sdk/compile.rs`** (~line 274): When building the compile command string, add `AVOCADO_BUILD_DIR=$AVOCADO_SDK_PREFIX/build/` and `mkdir -p` before invoking the script. +- **`src/commands/ext/build.rs`** (~line 1664): When running extension install scripts, also export `AVOCADO_BUILD_DIR` so ext install scripts can find compiled output. + +These are small, additive changes to the command strings that already set `AVOCADO_SDK_PREFIX`. + +### 3. New command module (`src/commands/sdk/package.rs`) + +Core `SdkPackageCommand` struct following `SdkCompileCommand` pattern: +- `section: String` (singular - package one section at a time) +- `output_dir: Option` (follows `ext package` pattern) +- Standard fields: config_path, verbose, target, container_args, dnf_args, no_stamps, sdk_arch + +Key methods: +- `execute()` - main pipeline (stamp check -> package.install script -> RPM build) +- `target_to_rpm_arch()` - target triple to RPM arch mapping +- `generate_rpm_build_script()` - shell script for staging + spec generation + rpmbuild +- `extract_rpm_metadata()` - reads from PackageConfig (not raw YAML like ext package) +- `copy_rpm_to_host()` / `create_temp_container()` - docker cp pattern from ext package + +RPM spec supports `%package -n -` for sub-packages with separate `%files` sections. + +### 4. Module registration (`src/commands/sdk/mod.rs`) + +Add `pub mod package;` and `pub use package::SdkPackageCommand;` + +### 5. CLI wiring (`src/main.rs`) + +- Import `SdkPackageCommand` +- Add `Package` variant to `SdkCommands` enum with args: config, verbose, target, section, output_dir, container_args, dnf_args +- Add dispatch arm following `SdkCommands::Compile` pattern + +### 6. Tests + +Unit tests in `package.rs`: +- Constructor, builder methods +- `target_to_rpm_arch()` for all supported targets + unknown pass-through +- RPM metadata extraction (minimal, full, missing version error) +- Summary/description auto-generation + +Config deserialization tests in `config.rs`: +- CompileConfig with/without new fields (backward compat) +- PackageConfig minimal (just version) and full +- SplitPackageConfig deserialization + +## Critical Files + +| File | Change | +|------|--------| +| `src/utils/config.rs` | Add `PackageConfig` (with `install`), `SplitPackageConfig`; add `package` to `CompileConfig` | +| `src/commands/sdk/compile.rs` | Add `$AVOCADO_BUILD_DIR` env var to compile command strings | +| `src/commands/ext/build.rs` | Add `$AVOCADO_BUILD_DIR` env var to extension install command strings | +| `src/commands/sdk/package.rs` | **NEW** - `SdkPackageCommand` implementation | +| `src/commands/sdk/mod.rs` | Register + re-export new module | +| `src/main.rs` | Add `Package` to `SdkCommands`, dispatch arm | + +Reference files (patterns to follow): +- `src/commands/sdk/compile.rs` - command structure, stamp validation, container execution +- `src/commands/ext/package.rs` - RPM spec generation, docker cp, metadata extraction + +## Verification + +1. `cargo build` in avocado-cli - confirms compilation +2. `cargo test` - new unit tests pass, existing tests unchanged +3. Manual test with a minimal config: + ```yaml + sdk: + image: docker.io/avocadolinux/sdk:dev + compile: + hello: + compile: compile.sh + packages: + gcc: "*" + package: + install: install.sh + version: "1.0.0" + ``` + ```bash + avocado sdk compile hello + avocado sdk package hello --out-dir ./out + rpm -qip ./out/hello-1.0.0-1.*.rpm # verify metadata + ``` +4. Test sub-package splitting with a `split` config and verify multiple RPMs are created From 3317df4f5159abc296772d4a49abae77c1372f7c Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 19:04:08 -0500 Subject: [PATCH 06/12] add sdk package command for cross-compiled RPM packaging - Add `sdk package
` command that stages compiled artifacts into a sysroot layout and builds architecture-specific RPMs via rpmbuild - Add `PackageConfig` and `SplitPackageConfig` structs to config; extend `CompileConfig` with optional `package` block (backward compatible) - Expose `$AVOCADO_BUILD_DIR` env var in `sdk compile`, `sdk clean`, and `ext build` install scripts for per-section out-of-tree build output - Fix `sdk clean` to pass `AVOCADO_BUILD_DIR` alongside `AVOCADO_SDK_PREFIX` - Use `--out` flag (consistent with other commands) for host output path; success message shows host-relative path, not container-internal path --- src/commands/ext/build.rs | 4 +- src/commands/sdk/clean.rs | 2 +- src/commands/sdk/compile.rs | 7 +- src/commands/sdk/install.rs | 31 + src/commands/sdk/mod.rs | 2 + src/commands/sdk/package.rs | 987 ++++++++++++++++++++++++++ src/main.rs | 48 +- src/utils/config.rs | 123 +++- src/utils/container.rs | 69 ++ tests/commands/avocado/sdk/install.rs | 14 + 10 files changed, 1279 insertions(+), 8 deletions(-) create mode 100644 src/commands/sdk/package.rs diff --git a/src/commands/ext/build.rs b/src/commands/ext/build.rs index e8e8340..ca0b415 100644 --- a/src/commands/ext/build.rs +++ b/src/commands/ext/build.rs @@ -1663,12 +1663,12 @@ echo "Set proper permissions on authentication files""#, // Note: Use double quotes for workdir so $AVOCADO_PREFIX gets expanded by the shell let install_command = if let Some(workdir) = ext_script_workdir { format!( - r#"cd "{workdir}" && if [ -f '{install_script}' ]; then echo 'Running install script: {install_script}'; export AVOCADO_BUILD_EXT_SYSROOT="$AVOCADO_EXT_SYSROOTS/{extension_name}"; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{install_script}'; else echo 'Install script {install_script} not found.'; ls -la; exit 1; fi"#, + r#"cd "{workdir}" && if [ -f '{install_script}' ]; then echo 'Running install script: {install_script}'; export AVOCADO_BUILD_EXT_SYSROOT="$AVOCADO_EXT_SYSROOTS/{extension_name}"; AVOCADO_BUILD_DIR=$AVOCADO_SDK_PREFIX/build/{compile_section} AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{install_script}'; else echo 'Install script {install_script} not found.'; ls -la; exit 1; fi"#, extension_name = self.extension ) } else { format!( - r#"if [ -f '{install_script}' ]; then echo 'Running install script: {install_script}'; export AVOCADO_BUILD_EXT_SYSROOT="$AVOCADO_EXT_SYSROOTS/{extension_name}"; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{install_script}'; else echo 'Install script {install_script} not found.'; ls -la; exit 1; fi"#, + r#"if [ -f '{install_script}' ]; then echo 'Running install script: {install_script}'; export AVOCADO_BUILD_EXT_SYSROOT="$AVOCADO_EXT_SYSROOTS/{extension_name}"; AVOCADO_BUILD_DIR=$AVOCADO_SDK_PREFIX/build/{compile_section} AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{install_script}'; else echo 'Install script {install_script} not found.'; ls -la; exit 1; fi"#, extension_name = self.extension ) }; diff --git a/src/commands/sdk/clean.rs b/src/commands/sdk/clean.rs index 7653bc6..076ea4e 100644 --- a/src/commands/sdk/clean.rs +++ b/src/commands/sdk/clean.rs @@ -222,7 +222,7 @@ impl SdkCleanCommand { // Build clean command - scripts are relative to src_dir (/opt/src in container) let clean_command = format!( - r#"if [ -f '{clean_script}' ]; then echo 'Running clean script: {clean_script}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{clean_script}'; else echo 'Clean script {clean_script} not found, skipping.'; fi"# + r#"if [ -f '{clean_script}' ]; then echo 'Running clean script: {clean_script}'; AVOCADO_BUILD_DIR=$AVOCADO_SDK_PREFIX/build/{section_name} AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{clean_script}'; else echo 'Clean script {clean_script} not found, skipping.'; fi"# ); if self.verbose { diff --git a/src/commands/sdk/compile.rs b/src/commands/sdk/compile.rs index ff80ca5..ff0691b 100644 --- a/src/commands/sdk/compile.rs +++ b/src/commands/sdk/compile.rs @@ -269,14 +269,15 @@ impl SdkCompileCommand { // Build compile command with optional workdir prefix // For remote extensions, scripts are in $AVOCADO_PREFIX/includes// instead of /opt/src // Note: Use double quotes for workdir so $AVOCADO_PREFIX gets expanded by the shell + let section_name = §ion.name; let compile_command = if let Some(ref workdir) = self.workdir { format!( - r#"cd "{workdir}" && if [ -f '{}' ]; then echo 'Running compile script: {}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{}'; else echo 'Compile script {} not found.' && ls -la; exit 1; fi"#, + r#"cd "{workdir}" && if [ -f '{}' ]; then echo 'Running compile script: {}'; mkdir -p $AVOCADO_SDK_PREFIX/build/{section_name} && AVOCADO_BUILD_DIR=$AVOCADO_SDK_PREFIX/build/{section_name} AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{}'; else echo 'Compile script {} not found.' && ls -la; exit 1; fi"#, section.script, section.script, section.script, section.script ) } else { format!( - r#"if [ -f '{}' ]; then echo 'Running compile script: {}'; AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{}'; else echo 'Compile script {} not found.' && ls -la; exit 1; fi"#, + r#"if [ -f '{}' ]; then echo 'Running compile script: {}'; mkdir -p $AVOCADO_SDK_PREFIX/build/{section_name} && AVOCADO_BUILD_DIR=$AVOCADO_SDK_PREFIX/build/{section_name} AVOCADO_SDK_PREFIX=$AVOCADO_SDK_PREFIX bash '{}'; else echo 'Compile script {} not found.' && ls -la; exit 1; fi"#, section.script, section.script, section.script, section.script ) }; @@ -459,6 +460,7 @@ dependencies = { gcc = "*" } compile: Some("my_script.sh".to_string()), clean: None, packages: Some(deps), + package: None, }; let script = cmd.find_compile_script_in_section(§ion_config); @@ -469,6 +471,7 @@ dependencies = { gcc = "*" } compile: None, clean: None, packages: None, + package: None, }; let script = cmd.find_compile_script_in_section(§ion_config_no_script); diff --git a/src/commands/sdk/install.rs b/src/commands/sdk/install.rs index 601a490..b78f7c5 100644 --- a/src/commands/sdk/install.rs +++ b/src/commands/sdk/install.rs @@ -315,6 +315,37 @@ cp -r /etc/rpm $AVOCADO_SDK_PREFIX/etc cp -r /etc/dnf $AVOCADO_SDK_PREFIX/etc cp -r /etc/yum.repos.d $AVOCADO_SDK_PREFIX/etc +# Compute the machine-scoped SDK arch (SDKIMGARCH) for this machine+host combination. +# This arch is used by nativesdk packages so that each machine gets independent PR +# revision tracking while sharing the same SDK host arch repo path. +MACHINE_US=$(echo "$AVOCADO_TARGET" | tr '-' '_') +SDK_ARCH_US=$(uname -m | tr '-' '_') +SDKIMGARCH_US="${MACHINE_US}_${SDK_ARCH_US}_avocadosdk" +GENERIC_SDK_ARCH_US="${SDK_ARCH_US}_avocadosdk" + +# Append arch compat entries to the rpmrc so RPM will accept SDKIMGARCH packages +# during bootstrap install. RPM_ETCCONFIGDIR points here for the bootstrap dnf call. +echo "arch_compat: ${SDKIMGARCH_US}: all any noarch ${SDK_ARCH_US} ${GENERIC_SDK_ARCH_US} all_avocadosdk ${SDKIMGARCH_US}" >> $AVOCADO_SDK_PREFIX/etc/rpmrc +echo "buildarch_compat: ${SDKIMGARCH_US}: noarch" >> $AVOCADO_SDK_PREFIX/etc/rpmrc + +# Prepend the SDKIMGARCH to the dnf arch vars so DNF searches the machine-scoped +# SDK repo for bootstrap packages (varsdir points here for the bootstrap dnf call). +ARCH_FILE=$AVOCADO_SDK_PREFIX/etc/dnf/vars/arch +EXISTING_ARCH=$(cat "$ARCH_FILE" 2>/dev/null || echo "") +if [ -n "$EXISTING_ARCH" ]; then + echo "${SDKIMGARCH_US}:${EXISTING_ARCH}" > "$ARCH_FILE" +else + echo "${SDKIMGARCH_US}" > "$ARCH_FILE" +fi + +# Update the rpm platform file to SDKIMGARCH so RPM's transaction check accepts +# machine-scoped packages. The platform file determines the host arch for RPM; +# without this, RPM sees x86_64_avocadosdk and rejects qemux86_64_x86_64_avocadosdk +# packages as "intended for a different architecture". +PLATFORM_FILE=$AVOCADO_SDK_PREFIX/etc/rpm/platform +rm -f "$PLATFORM_FILE" +echo "${SDKIMGARCH_US}-avocado-linux" > "$PLATFORM_FILE" + # Restore custom repo URL after copying container defaults (which may overwrite it) if [ -n "$AVOCADO_SDK_REPO_URL" ]; then mkdir -p $AVOCADO_SDK_PREFIX/etc/dnf/vars diff --git a/src/commands/sdk/mod.rs b/src/commands/sdk/mod.rs index 989ff24..1673844 100644 --- a/src/commands/sdk/mod.rs +++ b/src/commands/sdk/mod.rs @@ -3,6 +3,7 @@ pub mod compile; pub mod deps; pub mod dnf; pub mod install; +pub mod package; pub mod run; #[allow(unused_imports)] @@ -13,5 +14,6 @@ pub use deps::SdkDepsCommand; #[allow(unused_imports)] pub use dnf::SdkDnfCommand; pub use install::SdkInstallCommand; +pub use package::SdkPackageCommand; #[allow(unused_imports)] pub use run::SdkRunCommand; diff --git a/src/commands/sdk/package.rs b/src/commands/sdk/package.rs new file mode 100644 index 0000000..05b6e8a --- /dev/null +++ b/src/commands/sdk/package.rs @@ -0,0 +1,987 @@ +//! SDK package command implementation. +//! +//! Takes cross-compiled output, stages it into a sysroot layout, +//! and creates architecture-specific RPMs with optional sub-package splitting. + +use anyhow::{Context, Result}; +use std::collections::HashMap; +use std::fs; +use std::path::PathBuf; + +use crate::utils::{ + config::{Config, PackageConfig, SplitPackageConfig}, + container::{RunConfig, SdkContainer}, + output::{print_info, print_success, OutputLevel}, + stamps::{generate_batch_read_stamps_script, validate_stamps_batch, StampRequirement}, + target::resolve_target_required, +}; + +/// RPM metadata collected from PackageConfig +#[derive(Debug)] +struct RpmMetadata { + name: String, + version: String, + release: String, + summary: String, + description: String, + license: String, + arch: String, + vendor: String, + url: Option, + requires: Vec, +} + +/// Implementation of the 'sdk package' command. +pub struct SdkPackageCommand { + /// Path to configuration file + pub config_path: String, + /// Enable verbose output + pub verbose: bool, + /// Compile section to package + pub section: String, + /// Output directory on host for the built RPM(s) + pub output_dir: Option, + /// Global target architecture + pub target: Option, + /// Additional arguments to pass to the container runtime + pub container_args: Option>, + /// Additional arguments to pass to DNF commands + pub dnf_args: Option>, + /// Disable stamp validation + pub no_stamps: bool, + /// SDK container architecture for cross-arch emulation + pub sdk_arch: Option, +} + +impl SdkPackageCommand { + pub fn new( + config_path: String, + verbose: bool, + section: String, + output_dir: Option, + target: Option, + container_args: Option>, + dnf_args: Option>, + ) -> Self { + Self { + config_path, + verbose, + section, + output_dir, + target, + container_args, + dnf_args, + no_stamps: false, + sdk_arch: None, + } + } + + pub fn with_no_stamps(mut self, no_stamps: bool) -> Self { + self.no_stamps = no_stamps; + self + } + + pub fn with_sdk_arch(mut self, sdk_arch: Option) -> Self { + self.sdk_arch = sdk_arch; + self + } + + /// Execute the sdk package command + pub async fn execute(&self) -> Result<()> { + let config = Config::load_composed(&self.config_path, self.target.as_deref()) + .with_context(|| format!("Failed to load config from {}", self.config_path))?; + let config = &config.config; + + // Validate SDK install stamp + if !self.no_stamps { + let container_image = config + .get_sdk_image() + .context("No SDK container image specified in configuration")?; + let target = resolve_target_required(self.target.as_deref(), config)?; + let container_helper = + SdkContainer::from_config(&self.config_path, config)?.verbose(self.verbose); + + let requirements = vec![StampRequirement::sdk_install()]; + let batch_script = generate_batch_read_stamps_script(&requirements); + let run_config = RunConfig { + container_image: container_image.to_string(), + target: target.clone(), + command: batch_script, + verbose: false, + source_environment: true, + interactive: false, + repo_url: config.get_sdk_repo_url(), + repo_release: config.get_sdk_repo_release(), + container_args: config.merge_sdk_container_args(self.container_args.as_ref()), + dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), + ..Default::default() + }; + + let output = container_helper + .run_in_container_with_output(run_config) + .await?; + + let validation = + validate_stamps_batch(&requirements, output.as_deref().unwrap_or(""), None); + + if !validation.is_satisfied() { + validation + .into_error("Cannot run SDK package") + .print_and_exit(); + } + } + + // Look up the compile section + let sdk = config + .sdk + .as_ref() + .context("No 'sdk' section in configuration")?; + let compile_map = sdk + .compile + .as_ref() + .context("No 'sdk.compile' section in configuration")?; + let compile_config = compile_map.get(&self.section).ok_or_else(|| { + anyhow::anyhow!( + "Compile section '{}' not found. Available sections: {}", + self.section, + compile_map.keys().cloned().collect::>().join(", ") + ) + })?; + + // Validate package block + let pkg_config = compile_config.package.as_ref().ok_or_else(|| { + anyhow::anyhow!( + "Compile section '{}' has no 'package' block. Add a 'package' block with at least 'install' and 'version'.", + self.section + ) + })?; + + // Resolve target and architecture + let target = resolve_target_required(self.target.as_deref(), config)?; + + // Extract RPM metadata + let metadata = self.extract_rpm_metadata(pkg_config, &target)?; + + if self.verbose { + print_info( + &format!( + "Packaging section '{}' as {}-{}-{}.{}.rpm", + self.section, metadata.name, metadata.version, metadata.release, metadata.arch + ), + OutputLevel::Normal, + ); + } + + // Build and collect RPMs + let output_paths = self + .create_rpm_packages_in_container(&metadata, pkg_config, config, &target) + .await?; + + for path in &output_paths { + print_success( + &format!("Successfully created RPM: {}", path.display()), + OutputLevel::Normal, + ); + } + + Ok(()) + } + + /// Map a target triple or Avocado machine name to an RPM architecture string. + /// + /// Well-known compile triples (e.g. `aarch64-unknown-linux-gnu`) map to their + /// canonical RPM arch. Everything else is normalized: lowercased with hyphens + /// replaced by underscores, since RPM arch names cannot contain hyphens. + pub fn target_to_rpm_arch(target: &str) -> String { + if target.starts_with("aarch64-") || target == "aarch64" { + "aarch64".to_string() + } else if target.starts_with("x86_64-") || target == "x86_64" { + "x86_64".to_string() + } else if target.starts_with("armv7-") || target.starts_with("armv7hl") { + "armv7hl".to_string() + } else if target.starts_with("riscv64-") || target == "riscv64" { + "riscv64".to_string() + } else if target.starts_with("i686-") || target == "i686" { + "i686".to_string() + } else { + // Normalize: lowercase and replace hyphens with underscores. + // RPM arch names cannot contain hyphens, so e.g. "qemux86-64" → "qemux86_64". + target.to_lowercase().replace('-', "_") + } + } + + /// Extract RPM metadata from PackageConfig. + fn extract_rpm_metadata(&self, pkg: &PackageConfig, target: &str) -> Result { + // Validate version + crate::utils::version::validate_semver(&pkg.version).with_context(|| { + format!( + "Section '{}' has invalid version '{}'. Must be semver (e.g. '1.0.0')", + self.section, pkg.version + ) + })?; + + let name = pkg.name.clone().unwrap_or_else(|| self.section.clone()); + + let arch = pkg + .arch + .clone() + .unwrap_or_else(|| Self::target_to_rpm_arch(target)); + + let release = pkg.release.clone().unwrap_or_else(|| "1".to_string()); + + let license = pkg + .license + .clone() + .unwrap_or_else(|| "Unspecified".to_string()); + + let vendor = pkg + .vendor + .clone() + .unwrap_or_else(|| "Unspecified".to_string()); + + let summary = pkg + .summary + .clone() + .unwrap_or_else(|| Self::generate_summary(&name)); + + let description = pkg + .description + .clone() + .unwrap_or_else(|| Self::generate_description(&name)); + + let requires = pkg.requires.clone().unwrap_or_default(); + + Ok(RpmMetadata { + name, + version: pkg.version.clone(), + release, + summary, + description, + license, + arch, + vendor, + url: pkg.url.clone(), + requires, + }) + } + + fn generate_summary(name: &str) -> String { + let words: Vec = name + .split('-') + .map(|w| { + let mut c = w.chars(); + match c.next() { + None => String::new(), + Some(f) => f.to_uppercase().collect::() + c.as_str(), + } + }) + .collect(); + format!("{} compiled SDK package", words.join(" ")) + } + + fn generate_description(name: &str) -> String { + format!("Compiled SDK package for {name}") + } + + /// Create all RPM packages in the SDK container. + async fn create_rpm_packages_in_container( + &self, + metadata: &RpmMetadata, + pkg_config: &PackageConfig, + config: &Config, + target: &str, + ) -> Result> { + let container_image = config + .get_sdk_image() + .ok_or_else(|| anyhow::anyhow!("No SDK container image specified in configuration."))?; + + let merged_container_args = config.merge_sdk_container_args(self.container_args.as_ref()); + + let cwd = std::env::current_dir().context("Failed to get current directory")?; + let volume_manager = + crate::utils::volume::VolumeManager::new("docker".to_string(), self.verbose); + let volume_state = volume_manager.get_or_create_volume(&cwd).await?; + + // Build the RPM script + let (rpm_build_script, rpm_filenames) = + self.generate_rpm_build_script(metadata, pkg_config, target); + + if self.verbose { + print_info( + "Creating RPM package(s) in container...", + OutputLevel::Normal, + ); + } + + let container_helper = SdkContainer::new(); + let run_config = RunConfig { + container_image: container_image.to_string(), + target: target.to_string(), + command: rpm_build_script, + verbose: self.verbose, + source_environment: true, + interactive: false, + repo_url: config.get_sdk_repo_url(), + repo_release: config.get_sdk_repo_release(), + container_args: merged_container_args, + dnf_args: self.dnf_args.clone(), + sdk_arch: self.sdk_arch.clone(), + ..Default::default() + }; + + let success = container_helper.run_in_container(run_config).await?; + if !success { + return Err(anyhow::anyhow!( + "Failed to create RPM package(s) in container" + )); + } + + // Copy RPM(s) to host if --out-dir specified + let mut output_paths = Vec::new(); + for rpm_filename in &rpm_filenames { + let container_rpm_path = + format!("/opt/_avocado/{target}/output/packages/{rpm_filename}"); + + if let Some(output_dir) = &self.output_dir { + self.copy_rpm_to_host( + &volume_state.volume_name, + &container_rpm_path, + output_dir, + rpm_filename, + container_image, + ) + .await?; + + output_paths.push(PathBuf::from(output_dir).join(rpm_filename)); + } else { + output_paths.push(PathBuf::from(rpm_filename)); + } + } + + Ok(output_paths) + } + + /// Generate the RPM build shell script and return it along with the expected RPM filenames. + fn generate_rpm_build_script( + &self, + metadata: &RpmMetadata, + pkg_config: &PackageConfig, + _target: &str, + ) -> (String, Vec) { + let section = &self.section; + let name = &metadata.name; + let version = &metadata.version; + let release = &metadata.release; + let arch = &metadata.arch; + let summary = &metadata.summary; + let description = &metadata.description; + let license = &metadata.license; + let vendor = &metadata.vendor; + + let url_line = metadata + .url + .as_ref() + .map(|u| format!("URL: {u}")) + .unwrap_or_default(); + + let requires_lines: String = metadata + .requires + .iter() + .map(|r| format!("Requires: {r}")) + .collect::>() + .join("\n"); + + // Collect RPM filenames to expect + let main_rpm = format!("{name}-{version}-{release}.{arch}.rpm"); + let mut rpm_filenames = vec![main_rpm.clone()]; + + // Build sub-package spec sections and partition script + let (split_spec_sections, partition_script) = if let Some(split) = &pkg_config.split { + let spec = self.generate_split_spec_sections(split, name, version, arch); + let script = self.generate_partition_script(split); + // Add sub-package RPM filenames + for subpkg_name in split.keys() { + rpm_filenames.push(format!( + "{name}-{subpkg_name}-{version}-{release}.{arch}.rpm" + )); + } + (spec, script) + } else { + (String::new(), String::new()) + }; + + // Main %files section: if split defined, main gets unmatched files from $MAIN_DIR; + // otherwise all files from $STAGING + let main_files_section = if pkg_config.split.is_some() { + // Files come from partitioned MAIN_DIR + r#"%files +%defattr(-,root,root,-) +/*"# + .to_string() + } else if let Some(files) = &pkg_config.files { + // Explicit file patterns + let patterns = files.join("\n"); + format!("%files\n%defattr(-,root,root,-)\n{patterns}") + } else { + // All staged files + r#"%files +%defattr(-,root,root,-) +/*"# + .to_string() + }; + + let install_script = &pkg_config.install; + let install_script_escaped = install_script.replace('\'', "'\\''"); + + let script = format!( + r#" +set -e + +STAGING="$AVOCADO_SDK_PREFIX/staging/{section}" +BUILD_DIR="$AVOCADO_SDK_PREFIX/build/{section}" +OUTPUT_DIR="$AVOCADO_PREFIX/output/packages" + +mkdir -p "$STAGING" "$BUILD_DIR" "$OUTPUT_DIR" + +# Run install script with DESTDIR and AVOCADO_BUILD_DIR +export DESTDIR="$STAGING" +export AVOCADO_BUILD_DIR="$BUILD_DIR" + +if [ ! -f '{install_script_escaped}' ]; then + echo "ERROR: Install script not found: {install_script_escaped}" + exit 1 +fi + +echo "Running install script: {install_script_escaped}" +bash '{install_script_escaped}' + +# Verify files were staged +FILE_COUNT=$(find "$STAGING" -type f | wc -l) +if [ "$FILE_COUNT" -eq 0 ]; then + echo "ERROR: No files staged by install script" + exit 1 +fi +echo "Staged $FILE_COUNT file(s)" + +{partition_script} + +# Create RPM build tree +TMPDIR=$(mktemp -d) +mkdir -p "$TMPDIR/BUILD" "$TMPDIR/RPMS" "$TMPDIR/SOURCES" "$TMPDIR/SPECS" "$TMPDIR/SRPMS" + +# Generate spec file +# Note: heredoc is single-quoted so no shell expansion inside. +# The staging path is passed via rpmbuild --define so it becomes an RPM macro. +cat > "$TMPDIR/SPECS/package.spec" << 'SPEC_EOF' +%define _buildhost reproducible +AutoReqProv: no + +Name: {name} +Version: {version} +Release: {release} +Summary: {summary} +License: {license} +Vendor: {vendor} +{url_line} +{requires_lines} + +%description +{description} + +{split_spec_sections} + +%install +mkdir -p %{{buildroot}} +cp -a %{{staging_dir}}/. %{{buildroot}}/ + +{main_files_section} + +%clean +%changelog +SPEC_EOF + +# Run rpmbuild; pass staging_dir so %install can reference it as an RPM macro +rpmbuild --define "_topdir $TMPDIR" --define "staging_dir $STAGING" --define "_arch {arch}" --target {arch} -bb "$TMPDIR/SPECS/package.spec" + +# Move RPMs to output +find "$TMPDIR/RPMS" -name '*.rpm' | while read rpm_path; do + rpm_file=$(basename "$rpm_path") + mv "$rpm_path" "$OUTPUT_DIR/$rpm_file" + echo "RPM created: $OUTPUT_DIR/$rpm_file" +done + +rm -rf "$TMPDIR" +"#, + ); + + (script, rpm_filenames) + } + + /// Generate spec sub-package sections for split packages. + fn generate_split_spec_sections( + &self, + split: &HashMap, + parent_name: &str, + version: &str, + arch: &str, + ) -> String { + let mut sections = String::new(); + + // Sort for deterministic output + let mut subpkg_names: Vec<&String> = split.keys().collect(); + subpkg_names.sort(); + + for subpkg_name in subpkg_names { + let subpkg = &split[subpkg_name]; + let full_name = format!("{parent_name}-{subpkg_name}"); + + let summary = subpkg + .summary + .clone() + .unwrap_or_else(|| format!("{full_name} package")); + + let description = subpkg + .description + .clone() + .unwrap_or_else(|| format!("Sub-package {full_name}")); + + let requires_lines: String = subpkg + .requires + .as_deref() + .unwrap_or(&[]) + .iter() + .map(|r| format!("Requires: {r}")) + .collect::>() + .join("\n"); + + // %files section for this sub-package uses the patterns from config + let files_list: String = subpkg.files.join("\n"); + + sections.push_str(&format!( + r#" +%package -n {full_name} +Summary: {summary} +{requires_lines} + +%description -n {full_name} +{description} + +%files -n {full_name} +%defattr(-,root,root,-) +{files_list} + +"#, + )); + + let _ = (version, arch); // suppress unused warnings + } + + sections + } + + /// Generate file partitioning script for split packages. + /// This runs in the container after install.sh to handle file partitioning. + fn generate_partition_script(&self, split: &HashMap) -> String { + // For split packages, we still copy everything to buildroot and use + // RPM %files sections to claim files. The %files patterns from config + // are embedded directly in the spec. No shell-level partitioning needed. + // + // However, warn if any sub-packages are empty (best-effort, not blocking). + let _ = split; + String::new() + } + + /// Copy an RPM from the container volume to the host. + async fn copy_rpm_to_host( + &self, + volume_name: &str, + container_rpm_path: &str, + output_dir: &str, + rpm_filename: &str, + _container_image: &str, + ) -> Result<()> { + if self.verbose { + print_info( + &format!("Copying RPM to host: {output_dir}/{rpm_filename}"), + OutputLevel::Normal, + ); + } + + let temp_container_id = self.create_temp_container(volume_name).await?; + + let host_output_dir = if output_dir.starts_with('/') { + PathBuf::from(output_dir) + } else { + std::env::current_dir()?.join(output_dir) + }; + fs::create_dir_all(&host_output_dir)?; + + let docker_cp_source = format!("{temp_container_id}:{container_rpm_path}"); + let docker_cp_dest = host_output_dir.join(rpm_filename); + + if self.verbose { + print_info( + &format!( + "docker cp {} -> {}", + docker_cp_source, + docker_cp_dest.display() + ), + OutputLevel::Normal, + ); + } + + let copy_output = tokio::process::Command::new("docker") + .arg("cp") + .arg(&docker_cp_source) + .arg(&docker_cp_dest) + .output() + .await + .context("Failed to execute docker cp")?; + + let _ = tokio::process::Command::new("docker") + .arg("rm") + .arg("-f") + .arg(&temp_container_id) + .output() + .await; + + if !copy_output.status.success() { + let stderr = String::from_utf8_lossy(©_output.stderr); + return Err(anyhow::anyhow!("docker cp failed: {stderr}")); + } + + Ok(()) + } + + /// Create a temporary container to access the volume for docker cp. + async fn create_temp_container(&self, volume_name: &str) -> Result { + let output = tokio::process::Command::new("docker") + .arg("create") + .arg("-v") + .arg(format!("{volume_name}:/opt/_avocado:ro")) + .arg("alpine:latest") + .arg("true") + .output() + .await + .context("Failed to create temporary container")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(anyhow::anyhow!( + "Failed to create temporary container: {stderr}" + )); + } + + Ok(String::from_utf8_lossy(&output.stdout).trim().to_string()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn make_cmd() -> SdkPackageCommand { + SdkPackageCommand::new( + "avocado.yaml".to_string(), + false, + "my-app".to_string(), + None, + None, + None, + None, + ) + } + + #[test] + fn test_new() { + let cmd = SdkPackageCommand::new( + "test.yaml".to_string(), + true, + "hello".to_string(), + Some("./rpms".to_string()), + Some("aarch64-unknown-linux-gnu".to_string()), + None, + None, + ); + assert_eq!(cmd.config_path, "test.yaml"); + assert!(cmd.verbose); + assert_eq!(cmd.section, "hello"); + assert_eq!(cmd.output_dir, Some("./rpms".to_string())); + assert!(!cmd.no_stamps); + } + + #[test] + fn test_with_no_stamps() { + let cmd = make_cmd().with_no_stamps(true); + assert!(cmd.no_stamps); + } + + #[test] + fn test_with_sdk_arch() { + let cmd = make_cmd().with_sdk_arch(Some("aarch64".to_string())); + assert_eq!(cmd.sdk_arch, Some("aarch64".to_string())); + } + + #[test] + fn test_target_to_rpm_arch() { + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("aarch64-unknown-linux-gnu"), + "aarch64" + ); + assert_eq!(SdkPackageCommand::target_to_rpm_arch("aarch64"), "aarch64"); + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("x86_64-unknown-linux-gnu"), + "x86_64" + ); + assert_eq!(SdkPackageCommand::target_to_rpm_arch("x86_64"), "x86_64"); + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("armv7-unknown-linux-gnueabihf"), + "armv7hl" + ); + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("riscv64-unknown-linux-gnu"), + "riscv64" + ); + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("i686-unknown-linux-gnu"), + "i686" + ); + // Avocado machine names: hyphens become underscores, no hardcoded mapping + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("qemux86-64"), + "qemux86_64" + ); + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("qemuarm64"), + "qemuarm64" + ); + assert_eq!(SdkPackageCommand::target_to_rpm_arch("qemuarm"), "qemuarm"); + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("qemuriscv64"), + "qemuriscv64" + ); + // Unknown targets: normalize (lowercase + hyphens → underscores) + assert_eq!( + SdkPackageCommand::target_to_rpm_arch("mips-unknown-linux-gnu"), + "mips_unknown_linux_gnu" + ); + } + + #[test] + fn test_generate_summary() { + assert_eq!( + SdkPackageCommand::generate_summary("my-app"), + "My App compiled SDK package" + ); + assert_eq!( + SdkPackageCommand::generate_summary("libfoo"), + "Libfoo compiled SDK package" + ); + } + + #[test] + fn test_generate_description() { + assert_eq!( + SdkPackageCommand::generate_description("my-app"), + "Compiled SDK package for my-app" + ); + } + + #[test] + fn test_extract_rpm_metadata_minimal() { + let cmd = make_cmd(); + let pkg = PackageConfig { + install: "install.sh".to_string(), + version: "1.0.0".to_string(), + name: None, + release: None, + license: None, + summary: None, + description: None, + vendor: None, + url: None, + arch: None, + requires: None, + files: None, + split: None, + }; + + let meta = cmd + .extract_rpm_metadata(&pkg, "aarch64-unknown-linux-gnu") + .unwrap(); + + assert_eq!(meta.name, "my-app"); // defaults to section name + assert_eq!(meta.version, "1.0.0"); + assert_eq!(meta.release, "1"); + assert_eq!(meta.license, "Unspecified"); + assert_eq!(meta.vendor, "Unspecified"); + assert_eq!(meta.arch, "aarch64"); // derived from target + assert_eq!(meta.url, None); + assert!(meta.requires.is_empty()); + } + + #[test] + fn test_extract_rpm_metadata_full() { + let cmd = make_cmd(); + let pkg = PackageConfig { + install: "install.sh".to_string(), + version: "2.3.4".to_string(), + name: Some("custom-name".to_string()), + release: Some("2".to_string()), + license: Some("Apache-2.0".to_string()), + summary: Some("A custom summary".to_string()), + description: Some("A longer description".to_string()), + vendor: Some("Acme Corp".to_string()), + url: Some("https://example.com".to_string()), + arch: Some("noarch".to_string()), + requires: Some(vec!["glibc >= 2.17".to_string()]), + files: None, + split: None, + }; + + let meta = cmd + .extract_rpm_metadata(&pkg, "aarch64-unknown-linux-gnu") + .unwrap(); + + assert_eq!(meta.name, "custom-name"); + assert_eq!(meta.version, "2.3.4"); + assert_eq!(meta.release, "2"); + assert_eq!(meta.license, "Apache-2.0"); + assert_eq!(meta.summary, "A custom summary"); + assert_eq!(meta.description, "A longer description"); + assert_eq!(meta.vendor, "Acme Corp"); + assert_eq!(meta.url, Some("https://example.com".to_string())); + assert_eq!(meta.arch, "noarch"); // explicit override + assert_eq!(meta.requires, vec!["glibc >= 2.17"]); + } + + #[test] + fn test_extract_rpm_metadata_missing_version_error() { + let cmd = make_cmd(); + let pkg = PackageConfig { + install: "install.sh".to_string(), + version: "bad_version".to_string(), // invalid semver + name: None, + release: None, + license: None, + summary: None, + description: None, + vendor: None, + url: None, + arch: None, + requires: None, + files: None, + split: None, + }; + + let result = cmd.extract_rpm_metadata(&pkg, "x86_64-unknown-linux-gnu"); + assert!(result.is_err()); + } + + #[test] + fn test_config_deserialization_without_package() { + use std::io::Write; + use tempfile::NamedTempFile; + + let yaml = r#" +sdk: + image: "docker.io/avocadolinux/sdk:dev" + compile: + my-app: + compile: build.sh + packages: + gcc: "*" +"#; + let mut f = NamedTempFile::new().unwrap(); + write!(f, "{yaml}").unwrap(); + let config = Config::load(f.path()).unwrap(); + + let compile = config.sdk.unwrap().compile.unwrap(); + let section = compile.get("my-app").unwrap(); + assert_eq!(section.compile, Some("build.sh".to_string())); + assert!(section.package.is_none()); + } + + #[test] + fn test_config_deserialization_with_package_minimal() { + use std::io::Write; + use tempfile::NamedTempFile; + + let yaml = r#" +sdk: + image: "docker.io/avocadolinux/sdk:dev" + compile: + my-app: + compile: build.sh + package: + install: install.sh + version: "1.0.0" +"#; + let mut f = NamedTempFile::new().unwrap(); + write!(f, "{yaml}").unwrap(); + let config = Config::load(f.path()).unwrap(); + + let compile = config.sdk.unwrap().compile.unwrap(); + let section = compile.get("my-app").unwrap(); + let pkg = section.package.as_ref().unwrap(); + assert_eq!(pkg.install, "install.sh"); + assert_eq!(pkg.version, "1.0.0"); + assert!(pkg.name.is_none()); + assert!(pkg.split.is_none()); + } + + #[test] + fn test_config_deserialization_with_package_full() { + use std::io::Write; + use tempfile::NamedTempFile; + + let yaml = r#" +sdk: + image: "docker.io/avocadolinux/sdk:dev" + compile: + my-app: + compile: build.sh + package: + install: install.sh + version: "1.2.3" + name: my-custom-app + release: "2" + license: MIT + summary: "My custom app" + vendor: "Acme Corp" + arch: aarch64 + requires: + - "glibc >= 2.17" + files: + - /usr/bin/* + split: + dev: + summary: "Dev files" + files: + - /usr/include/** +"#; + let mut f = NamedTempFile::new().unwrap(); + write!(f, "{yaml}").unwrap(); + let config = Config::load(f.path()).unwrap(); + + let compile = config.sdk.unwrap().compile.unwrap(); + let section = compile.get("my-app").unwrap(); + let pkg = section.package.as_ref().unwrap(); + + assert_eq!(pkg.version, "1.2.3"); + assert_eq!(pkg.name, Some("my-custom-app".to_string())); + assert_eq!(pkg.release, Some("2".to_string())); + assert_eq!(pkg.license, Some("MIT".to_string())); + assert_eq!(pkg.arch, Some("aarch64".to_string())); + assert!(pkg + .requires + .as_ref() + .unwrap() + .contains(&"glibc >= 2.17".to_string())); + assert!(pkg.split.is_some()); + + let dev = pkg.split.as_ref().unwrap().get("dev").unwrap(); + assert_eq!(dev.files, vec!["/usr/include/**"]); + } +} diff --git a/src/main.rs b/src/main.rs index 915c928..054a498 100644 --- a/src/main.rs +++ b/src/main.rs @@ -26,7 +26,7 @@ use commands::runtime::{ }; use commands::sdk::{ SdkCleanCommand, SdkCompileCommand, SdkDepsCommand, SdkDnfCommand, SdkInstallCommand, - SdkRunCommand, + SdkPackageCommand, SdkRunCommand, }; use commands::sign::SignCommand; use commands::signing_keys::{ @@ -562,6 +562,29 @@ enum SdkCommands { #[arg(long = "dnf-arg", num_args = 1, allow_hyphen_values = true, action = clap::ArgAction::Append)] dnf_args: Option>, }, + /// Package a compiled SDK section into an RPM + Package { + /// Path to avocado.yaml configuration file + #[arg(short = 'C', long, default_value = "avocado.yaml")] + config: String, + /// Enable verbose output + #[arg(short, long)] + verbose: bool, + /// Target architecture + #[arg(short, long)] + target: Option, + /// Compile section to package (must have a 'package' block in config) + section: String, + /// Output directory on host for the built RPM(s) + #[arg(long = "out")] + out_dir: Option, + /// Additional arguments to pass to the container runtime + #[arg(long = "container-arg", num_args = 1, allow_hyphen_values = true, action = clap::ArgAction::Append)] + container_args: Option>, + /// Additional arguments to pass to DNF commands + #[arg(long = "dnf-arg", num_args = 1, allow_hyphen_values = true, action = clap::ArgAction::Append)] + dnf_args: Option>, + }, } #[derive(Subcommand)] @@ -1737,6 +1760,29 @@ async fn main() -> Result<()> { clean_cmd.execute().await?; Ok(()) } + SdkCommands::Package { + config, + verbose, + target, + section, + out_dir, + container_args, + dnf_args, + } => { + let package_cmd = SdkPackageCommand::new( + config, + verbose, + section, + out_dir, + target.or(cli.target), + container_args, + dnf_args, + ) + .with_no_stamps(cli.no_stamps) + .with_sdk_arch(cli.sdk_arch.clone()); + package_cmd.execute().await?; + Ok(()) + } }, } } diff --git a/src/utils/config.rs b/src/utils/config.rs index 35a1643..f99b736 100644 --- a/src/utils/config.rs +++ b/src/utils/config.rs @@ -416,6 +416,49 @@ pub struct CompileConfig { pub clean: Option, #[serde(alias = "dependencies")] pub packages: Option>, + #[serde(default)] + pub package: Option, +} + +/// RPM packaging configuration for a compiled SDK section +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct PackageConfig { + /// Script that stages files to $DESTDIR (required) + pub install: String, + /// RPM version in semver format (required) + pub version: String, + /// RPM package name (defaults to section name) + pub name: Option, + /// RPM release tag (defaults to "1") + pub release: Option, + /// RPM license field (defaults to "Unspecified") + pub license: Option, + /// RPM summary line (auto-generated from name if missing) + pub summary: Option, + /// RPM description (auto-generated from name if missing) + pub description: Option, + /// RPM vendor field (defaults to "Unspecified") + pub vendor: Option, + /// RPM URL field (optional) + pub url: Option, + /// RPM target architecture (derived from target triple if not set) + pub arch: Option, + /// RPM Requires dependencies + pub requires: Option>, + /// Glob patterns for files included in main package (all staged files if omitted) + pub files: Option>, + /// Sub-package definitions (Yocto-inspired splitting) + pub split: Option>, +} + +/// Sub-package configuration for split RPM packages +#[derive(Debug, Clone, Deserialize, Serialize)] +pub struct SplitPackageConfig { + pub summary: Option, + pub description: Option, + pub requires: Option>, + /// Glob patterns for files belonging to this sub-package (required) + pub files: Vec, } /// Provision profile configuration @@ -3820,9 +3863,13 @@ pub fn find_active_extensions( Ok(active_extensions) } -/// Find the set of sdk.compile section names that are referenced by active extensions. +/// Find the set of sdk.compile section names that are referenced by active extensions +/// or by any runtime's `kernel.compile` field. +/// +/// This examines: +/// - The `packages` section of each active extension for `compile:` references +/// - The `kernel.compile` field of every runtime /// -/// This examines the `packages` section of each active extension for `compile:` references, /// returning only the compile section names that are actually needed. pub fn find_active_compile_sections( parsed: &serde_yaml::Value, @@ -3840,6 +3887,21 @@ pub fn find_active_compile_sections( } } + // Also include compile sections referenced by runtimes via kernel.compile + if let Some(runtimes) = parsed.get("runtimes").and_then(|r| r.as_mapping()) { + for (_runtime_name, runtime_val) in runtimes { + if let Some(section_name) = runtime_val + .get("kernel") + .and_then(|k| k.get("compile")) + .and_then(|c| c.as_str()) + { + if seen.insert(section_name.to_string()) { + active_sections.push(section_name.to_string()); + } + } + } + } + active_sections.sort(); active_sections } @@ -4214,6 +4276,63 @@ extensions: assert!(result.is_err()); } + #[test] + fn test_find_active_compile_sections_via_runtime_kernel() { + let config_content = r#" +runtimes: + dev: + kernel: + compile: kernel + install: kernel-install.sh + +sdk: + image: "docker.io/avocadolinux/sdk:edge" + compile: + kernel: + compile: kernel-compile.sh + packages: + glibc-dev: '*' + libelf1: '*' +"#; + let parsed: serde_yaml::Value = serde_yaml::from_str(config_content).unwrap(); + let no_extensions = std::collections::HashSet::new(); + + let active = find_active_compile_sections(&parsed, &no_extensions); + + assert_eq!(active, vec!["kernel"]); + } + + #[test] + fn test_find_active_compile_sections_deduplicates() { + // A section referenced both by an extension and a runtime kernel.compile should appear once + let config_content = r#" +runtimes: + dev: + kernel: + compile: kernel + +extensions: + my-ext: + packages: + my-dep: + compile: kernel + +sdk: + image: "docker.io/avocadolinux/sdk:edge" + compile: + kernel: + compile: kernel-compile.sh +"#; + let parsed: serde_yaml::Value = serde_yaml::from_str(config_content).unwrap(); + let mut active_exts = std::collections::HashSet::new(); + active_exts.insert("my-ext".to_string()); + + let active = find_active_compile_sections(&parsed, &active_exts); + + assert_eq!(active.len(), 1); + assert_eq!(active[0], "kernel"); + } + #[test] fn test_sdk_container_args() { let config_content = r#" diff --git a/src/utils/container.rs b/src/utils/container.rs index 1218d63..575382e 100644 --- a/src/utils/container.rs +++ b/src/utils/container.rs @@ -1617,6 +1617,32 @@ mkdir -p ${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars echo "${{REPO_URL}}" > /etc/dnf/vars/repo_url echo "${{REPO_URL}}" > ${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars/repo_url echo "${{REPO_URL}}" > ${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars/repo_url + +# Re-apply machine-scoped SDK arch and rpm platform to override any package scriptlet +# resets. Package post-install scripts (via update-alternatives) may register a generic +# (host-arch-only) alternative. Without this repair the rpm platform stays at +# x86_64_avocadosdk and RPM rejects qemux86_64_x86_64_avocadosdk packages as +# "intended for a different architecture" during the transaction check. +_SDK_MACHINE_US=$(echo "${{AVOCADO_TARGET}}" | tr '-' '_') +_SDK_HOST_US=$(uname -m | tr '-' '_') +_SDKIMGARCH="${{_SDK_MACHINE_US}}_${{_SDK_HOST_US}}_avocadosdk" +if [ -f "${{AVOCADO_SDK_PREFIX}}/etc/dnf/vars/arch" ]; then + _ARCH_FILE="${{AVOCADO_SDK_PREFIX}}/etc/dnf/vars/arch" + _ARCH_CURRENT=$(cat "${{_ARCH_FILE}}" 2>/dev/null || echo "") + _ARCH_FIRST=$(echo "${{_ARCH_CURRENT}}" | cut -d: -f1) + if [ -n "${{_ARCH_CURRENT}}" ] && [ "${{_ARCH_FIRST}}" != "${{_SDKIMGARCH}}" ]; then + rm -f "${{_ARCH_FILE}}" + echo "${{_SDKIMGARCH}}:${{_ARCH_CURRENT}}" > "${{_ARCH_FILE}}" + fi +fi +if [ -f "${{AVOCADO_SDK_PREFIX}}/etc/rpm/platform" ]; then + _PLATFORM_FILE="${{AVOCADO_SDK_PREFIX}}/etc/rpm/platform" + _PLATFORM_CURRENT=$(cat "${{_PLATFORM_FILE}}" 2>/dev/null || echo "") + if [ "${{_PLATFORM_CURRENT}}" != "${{_SDKIMGARCH}}-avocado-linux" ]; then + rm -f "${{_PLATFORM_FILE}}" + echo "${{_SDKIMGARCH}}-avocado-linux" > "${{_PLATFORM_FILE}}" + fi +fi "# ); @@ -1840,6 +1866,32 @@ mkdir -p ${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars echo "${{REPO_URL}}" > /etc/dnf/vars/repo_url echo "${{REPO_URL}}" > ${{DNF_SDK_HOST_PREFIX}}/etc/dnf/vars/repo_url echo "${{REPO_URL}}" > ${{DNF_SDK_TARGET_PREFIX}}/etc/dnf/vars/repo_url + +# Re-apply machine-scoped SDK arch and rpm platform to override any package scriptlet +# resets. Package post-install scripts (via update-alternatives) may register a generic +# (host-arch-only) alternative. Without this repair the rpm platform stays at +# x86_64_avocadosdk and RPM rejects qemux86_64_x86_64_avocadosdk packages as +# "intended for a different architecture" during the transaction check. +_SDK_MACHINE_US=$(echo "${{AVOCADO_TARGET}}" | tr '-' '_') +_SDK_HOST_US=$(uname -m | tr '-' '_') +_SDKIMGARCH="${{_SDK_MACHINE_US}}_${{_SDK_HOST_US}}_avocadosdk" +if [ -f "${{AVOCADO_SDK_PREFIX}}/etc/dnf/vars/arch" ]; then + _ARCH_FILE="${{AVOCADO_SDK_PREFIX}}/etc/dnf/vars/arch" + _ARCH_CURRENT=$(cat "${{_ARCH_FILE}}" 2>/dev/null || echo "") + _ARCH_FIRST=$(echo "${{_ARCH_CURRENT}}" | cut -d: -f1) + if [ -n "${{_ARCH_CURRENT}}" ] && [ "${{_ARCH_FIRST}}" != "${{_SDKIMGARCH}}" ]; then + rm -f "${{_ARCH_FILE}}" + echo "${{_SDKIMGARCH}}:${{_ARCH_CURRENT}}" > "${{_ARCH_FILE}}" + fi +fi +if [ -f "${{AVOCADO_SDK_PREFIX}}/etc/rpm/platform" ]; then + _PLATFORM_FILE="${{AVOCADO_SDK_PREFIX}}/etc/rpm/platform" + _PLATFORM_CURRENT=$(cat "${{_PLATFORM_FILE}}" 2>/dev/null || echo "") + if [ "${{_PLATFORM_CURRENT}}" != "${{_SDKIMGARCH}}-avocado-linux" ]; then + rm -f "${{_PLATFORM_FILE}}" + echo "${{_SDKIMGARCH}}-avocado-linux" > "${{_PLATFORM_FILE}}" + fi +fi "# ); @@ -2226,6 +2278,23 @@ mod tests { assert!(!script.contains("cd /opt/src")); } + #[test] + fn test_entrypoint_script_sdkimgarch_repair() { + let container = SdkContainer::new(); + let script = + container.create_entrypoint_script(true, None, None, "qemux86-64", false, false); + // Verify arch repair block + assert!(script.contains("_SDKIMGARCH=")); + assert!(script.contains("_ARCH_FIRST=$(echo \"${_ARCH_CURRENT}\" | cut -d: -f1)")); + assert!(script.contains("[ \"${_ARCH_FIRST}\" != \"${_SDKIMGARCH}\" ]")); + assert!(script.contains("echo \"${_SDKIMGARCH}:${_ARCH_CURRENT}\"")); + // Verify platform repair block + assert!(script.contains("etc/rpm/platform")); + assert!(script.contains("\"${_SDKIMGARCH}-avocado-linux\"")); + // AVOCADO_TARGET is used to derive machine component + assert!(script.contains("echo \"${AVOCADO_TARGET}\" | tr '-' '_'")); + } + #[test] fn test_entrypoint_script_no_bootstrap() { let container = SdkContainer::new(); diff --git a/tests/commands/avocado/sdk/install.rs b/tests/commands/avocado/sdk/install.rs index 1333d39..9a593ff 100644 --- a/tests/commands/avocado/sdk/install.rs +++ b/tests/commands/avocado/sdk/install.rs @@ -30,3 +30,17 @@ fn test_short_help() { common::assert_cmd(&["sdk", "install", "-h"], None, None); }); } + +/// The SDKIMGARCH arch repair logic in the entrypoint setup script ensures that +/// (target_underscored)_(host_arch)_avocadosdk is always first in +/// $AVOCADO_SDK_PREFIX/etc/dnf/vars/arch, overriding any package post-install +/// scripts that register a generic host-arch-only alternative via update-alternatives. +/// The unit-level coverage for this lives in container.rs (test_entrypoint_script_sdkimgarch_repair). +#[test] +fn test_sdk_install_help_sdkimgarch() { + with_rust_cli(|| { + // Verify the sdk install subcommand is reachable; this guards against + // regressions introduced while adding the SDKIMGARCH init changes. + common::assert_cmd(&["sdk", "install", "--help"], None, None); + }); +} From c5dbcdbd8457b44cc4dd973bd554d7e25bb454bd Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 19:15:11 -0500 Subject: [PATCH 07/12] add --out flag to ext image to copy resulting image to host --- src/commands/ext/image.rs | 124 +++++++++++++++++++++++++++++++++++--- src/main.rs | 7 ++- 2 files changed, 121 insertions(+), 10 deletions(-) diff --git a/src/commands/ext/image.rs b/src/commands/ext/image.rs index 6612786..f4b899e 100644 --- a/src/commands/ext/image.rs +++ b/src/commands/ext/image.rs @@ -1,4 +1,5 @@ use anyhow::{Context, Result}; +use std::path::PathBuf; use std::sync::Arc; use super::find_ext_in_mapping; @@ -23,6 +24,7 @@ pub struct ExtImageCommand { runs_on: Option, nfs_port: Option, sdk_arch: Option, + output_dir: Option, /// Pre-composed configuration to avoid reloading composed_config: Option>, } @@ -47,6 +49,7 @@ impl ExtImageCommand { runs_on: None, nfs_port: None, sdk_arch: None, + output_dir: None, composed_config: None, } } @@ -70,6 +73,12 @@ impl ExtImageCommand { self } + /// Set host output directory to copy the image to after creation + pub fn with_output_dir(mut self, output_dir: Option) -> Self { + self.output_dir = output_dir; + self + } + /// Set pre-composed configuration to avoid reloading pub fn with_composed_config(mut self, config: Arc) -> Self { self.composed_config = Some(config); @@ -397,15 +406,44 @@ impl ExtImageCommand { .await?; if result { - print_success( - &format!( - "Successfully created image for extension '{}-{}' (types: {}).", - self.extension, - &ext_version, - ext_types.join(", ") - ), - OutputLevel::Normal, - ); + let image_filename = format!("{}-{}.raw", self.extension, ext_version); + let container_image_path = + format!("/opt/_avocado/{target_arch}/output/extensions/{image_filename}"); + + // Copy image to host if --out specified + if let Some(output_dir) = &self.output_dir { + let cwd = std::env::current_dir().context("Failed to get current directory")?; + let volume_manager = + crate::utils::volume::VolumeManager::new("docker".to_string(), self.verbose); + let volume_state = volume_manager.get_or_create_volume(&cwd).await?; + self.copy_image_to_host( + &volume_state.volume_name, + &container_image_path, + output_dir, + &image_filename, + container_image, + ) + .await?; + print_success( + &format!( + "Successfully created image for extension '{}-{}': {}", + self.extension, + &ext_version, + PathBuf::from(output_dir).join(&image_filename).display() + ), + OutputLevel::Normal, + ); + } else { + print_success( + &format!( + "Successfully created image for extension '{}-{}' (types: {}).", + self.extension, + &ext_version, + ext_types.join(", ") + ), + OutputLevel::Normal, + ); + } // Write extension image stamp (unless --no-stamps) if !self.no_stamps { @@ -451,6 +489,74 @@ impl ExtImageCommand { Ok(()) } + async fn copy_image_to_host( + &self, + volume_name: &str, + container_image_path: &str, + output_dir: &str, + image_filename: &str, + _container_image: &str, + ) -> Result<()> { + if self.verbose { + print_info( + &format!("Copying image to host: {output_dir}/{image_filename}"), + OutputLevel::Normal, + ); + } + + let host_output_dir = if output_dir.starts_with('/') { + PathBuf::from(output_dir) + } else { + std::env::current_dir()?.join(output_dir) + }; + std::fs::create_dir_all(&host_output_dir)?; + + let temp_container_id = self.create_temp_container(volume_name).await?; + + let docker_cp_source = format!("{temp_container_id}:{container_image_path}"); + let docker_cp_dest = host_output_dir.join(image_filename); + + let output = tokio::process::Command::new("docker") + .args(["cp", &docker_cp_source, docker_cp_dest.to_str().unwrap()]) + .output() + .await + .context("Failed to run docker cp")?; + + let _ = tokio::process::Command::new("docker") + .args(["rm", "-f", &temp_container_id]) + .output() + .await; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(anyhow::anyhow!("Failed to copy image to host: {stderr}")); + } + + Ok(()) + } + + async fn create_temp_container(&self, volume_name: &str) -> Result { + let output = tokio::process::Command::new("docker") + .args([ + "create", + "--rm", + "-v", + &format!("{volume_name}:/opt/_avocado"), + "busybox", + "true", + ]) + .output() + .await + .context("Failed to create temp container")?; + + if !output.status.success() { + let stderr = String::from_utf8_lossy(&output.stderr); + return Err(anyhow::anyhow!("Failed to create temp container: {stderr}")); + } + + Ok(String::from_utf8_lossy(&output.stdout).trim().to_string()) + } + #[allow(clippy::too_many_arguments)] async fn create_image( &self, diff --git a/src/main.rs b/src/main.rs index 054a498..7cf32cd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1554,6 +1554,7 @@ async fn main() -> Result<()> { config, verbose, target, + out_dir, container_args, dnf_args, } => { @@ -1566,7 +1567,8 @@ async fn main() -> Result<()> { dnf_args, ) .with_no_stamps(cli.no_stamps) - .with_sdk_arch(cli.sdk_arch.clone()); + .with_sdk_arch(cli.sdk_arch.clone()) + .with_output_dir(out_dir); image_cmd.execute().await?; Ok(()) } @@ -1959,6 +1961,9 @@ enum ExtCommands { /// Target architecture #[arg(short, long)] target: Option, + /// Output directory on host to copy the resulting image to + #[arg(long = "out")] + out_dir: Option, /// Additional arguments to pass to the container runtime #[arg(long = "container-arg", num_args = 1, allow_hyphen_values = true, action = clap::ArgAction::Append)] container_args: Option>, From c3a7d79801764eadc3541f8a4f6b0fb21654ca10 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 19:35:12 -0500 Subject: [PATCH 08/12] make ext/runtime subcommand names positional args with -e/-r as hidden compat flags Extension and runtime names can now be passed as positional arguments (e.g. `avocado ext image app`, `avocado runtime build dev`) matching the pattern already used by `avocado sdk compile `. The -e and -r flags are kept hidden for backward compatibility. Note: `ext dnf` and `runtime dnf` are excluded due to clap limitations with trailing_var_arg. --- src/main.rs | 168 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 124 insertions(+), 44 deletions(-) diff --git a/src/main.rs b/src/main.rs index 7cf32cd..dca83fa 100644 --- a/src/main.rs +++ b/src/main.rs @@ -591,6 +591,8 @@ enum SdkCommands { enum RuntimeCommands { /// Install dependencies into runtime installroots Install { + /// Runtime name (if not provided, installs for all runtimes) + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, @@ -600,8 +602,8 @@ enum RuntimeCommands { /// Force the operation to proceed, bypassing warnings or confirmation prompts #[arg(short, long)] force: bool, - /// Runtime name to install dependencies for (if not provided, installs for all runtimes) - #[arg(short = 'r', long = "runtime")] + /// Runtime name (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] runtime: Option, /// Target architecture #[arg(short, long)] @@ -615,6 +617,8 @@ enum RuntimeCommands { }, /// Build a runtime Build { + /// Runtime name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, @@ -624,9 +628,9 @@ enum RuntimeCommands { /// Force the operation to proceed, bypassing warnings or confirmation prompts #[arg(short, long)] force: bool, - /// Runtime name to build - #[arg(short = 'r', long = "runtime", required = true)] - runtime: String, + /// Runtime name (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] + runtime: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -639,6 +643,8 @@ enum RuntimeCommands { }, /// Provision a runtime Provision { + /// Runtime name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, @@ -648,9 +654,9 @@ enum RuntimeCommands { /// Force the operation to proceed, bypassing warnings or confirmation prompts #[arg(short, long)] force: bool, - /// Runtime name to provision - #[arg(short = 'r', long = "runtime", required = true)] - runtime: String, + /// Runtime name (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] + runtime: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -681,12 +687,14 @@ enum RuntimeCommands { }, /// List dependencies for a runtime Deps { + /// Runtime name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, - /// Runtime name to list dependencies for - #[arg(short = 'r', long = "runtime", required = true)] - runtime: String, + /// Runtime name (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] + runtime: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -717,15 +725,17 @@ enum RuntimeCommands { }, /// Clean runtime installroot directory Clean { + /// Runtime name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Name of the runtime to clean - #[arg(short = 'r', long = "runtime", required = true)] - runtime: String, + /// Runtime name (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] + runtime: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -738,15 +748,17 @@ enum RuntimeCommands { }, /// Deploy a runtime to a device Deploy { + /// Runtime name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Runtime name to deploy - #[arg(short = 'r', long = "runtime", required = true)] - runtime: String, + /// Runtime name (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] + runtime: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -762,15 +774,17 @@ enum RuntimeCommands { }, /// Sign runtime images Sign { + /// Runtime name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Runtime name to sign - #[arg(short = 'r', long = "runtime", required = true)] - runtime: String, + /// Runtime name (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] + runtime: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -1200,6 +1214,7 @@ async fn main() -> Result<()> { } Commands::Runtime { command } => match command { RuntimeCommands::Install { + name, runtime, config, verbose, @@ -1208,6 +1223,7 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name.or(runtime); // Validate runtime exists if provided validate_runtime_if_provided(&config, runtime.as_ref())?; @@ -1226,6 +1242,7 @@ async fn main() -> Result<()> { Ok(()) } RuntimeCommands::Build { + name, runtime, config, verbose, @@ -1234,6 +1251,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name + .or(runtime) + .context("runtime name is required (provide as positional or -r/--runtime)")?; // Validate runtime exists (required argument) validate_runtime_required(&config, &runtime)?; @@ -1252,6 +1272,7 @@ async fn main() -> Result<()> { Ok(()) } RuntimeCommands::Provision { + name, runtime, config, verbose, @@ -1263,6 +1284,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name + .or(runtime) + .context("runtime name is required (provide as positional or -r/--runtime)")?; // Validate runtime exists (required argument) validate_runtime_required(&config, &runtime)?; @@ -1294,10 +1318,14 @@ async fn main() -> Result<()> { Ok(()) } RuntimeCommands::Deps { + name, config, runtime, target: _, } => { + let runtime = name + .or(runtime) + .context("runtime name is required (provide as positional or -r/--runtime)")?; // Validate runtime exists (required argument) validate_runtime_required(&config, &runtime)?; @@ -1331,6 +1359,7 @@ async fn main() -> Result<()> { Ok(()) } RuntimeCommands::Clean { + name, config, verbose, runtime, @@ -1338,6 +1367,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name + .or(runtime) + .context("runtime name is required (provide as positional or -r/--runtime)")?; // Validate runtime exists (required argument) validate_runtime_required(&config, &runtime)?; @@ -1354,6 +1386,7 @@ async fn main() -> Result<()> { Ok(()) } RuntimeCommands::Deploy { + name, config, verbose, runtime, @@ -1362,6 +1395,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name + .or(runtime) + .context("runtime name is required (provide as positional or -r/--runtime)")?; // Validate runtime exists (required argument) validate_runtime_required(&config, &runtime)?; @@ -1380,6 +1416,7 @@ async fn main() -> Result<()> { Ok(()) } RuntimeCommands::Sign { + name, config, verbose, runtime, @@ -1387,6 +1424,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name + .or(runtime) + .context("runtime name is required (provide as positional or -r/--runtime)")?; // Validate runtime exists (required argument) validate_runtime_required(&config, &runtime)?; @@ -1406,6 +1446,7 @@ async fn main() -> Result<()> { }, Commands::Ext { command } => match command { ExtCommands::Install { + name, config, verbose, force, @@ -1415,7 +1456,7 @@ async fn main() -> Result<()> { dnf_args, } => { let install_cmd = ExtInstallCommand::new( - extension, + name.or(extension), config, verbose, force, @@ -1429,6 +1470,7 @@ async fn main() -> Result<()> { Ok(()) } ExtCommands::Fetch { + name, config, verbose, force, @@ -1438,7 +1480,7 @@ async fn main() -> Result<()> { } => { let fetch_cmd = ExtFetchCommand::new( config, - extension, + name.or(extension), verbose, force, target.or(cli.target.clone()), @@ -1449,6 +1491,7 @@ async fn main() -> Result<()> { Ok(()) } ExtCommands::Build { + name, extension, config, verbose, @@ -1456,6 +1499,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let extension = name.or(extension).context( + "extension name is required (provide as positional or -e/--extension)", + )?; let build_cmd = ExtBuildCommand::new( extension, config, @@ -1471,6 +1517,7 @@ async fn main() -> Result<()> { Ok(()) } ExtCommands::Checkout { + name, config, verbose, extension, @@ -1479,6 +1526,9 @@ async fn main() -> Result<()> { src_path, container_tool, } => { + let extension = name.or(extension).context( + "extension name is required (provide as positional or -e/--extension)", + )?; let checkout_cmd = ExtCheckoutCommand::new( extension, ext_path, @@ -1499,11 +1549,13 @@ async fn main() -> Result<()> { Ok(()) } ExtCommands::Deps { + name, config, extension, target, } => { - let deps_cmd = ExtDepsCommand::new(config, extension, target.or(cli.target)); + let deps_cmd = + ExtDepsCommand::new(config, name.or(extension), target.or(cli.target)); deps_cmd.execute()?; Ok(()) } @@ -1530,6 +1582,7 @@ async fn main() -> Result<()> { Ok(()) } ExtCommands::Clean { + name, extension, config, verbose, @@ -1537,6 +1590,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let extension = name.or(extension).context( + "extension name is required (provide as positional or -e/--extension)", + )?; let clean_cmd = ExtCleanCommand::new( extension, config, @@ -1550,6 +1606,7 @@ async fn main() -> Result<()> { Ok(()) } ExtCommands::Image { + name, extension, config, verbose, @@ -1558,6 +1615,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let extension = name.or(extension).context( + "extension name is required (provide as positional or -e/--extension)", + )?; let image_cmd = ExtImageCommand::new( extension, config, @@ -1573,6 +1633,7 @@ async fn main() -> Result<()> { Ok(()) } ExtCommands::Package { + name, extension, target, config, @@ -1581,6 +1642,9 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let extension = name.or(extension).context( + "extension name is required (provide as positional or -e/--extension)", + )?; let package_cmd = ExtPackageCommand::new( config, extension, @@ -1793,6 +1857,8 @@ async fn main() -> Result<()> { enum ExtCommands { /// Install dependencies into extension sysroots Install { + /// Extension name (if not provided, installs all extensions) + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, @@ -1802,8 +1868,8 @@ enum ExtCommands { /// Force the operation to proceed, bypassing warnings or confirmation prompts #[arg(short, long)] force: bool, - /// Name of the extension to install (if not provided, installs all extensions) - #[arg(short = 'e', long = "extension")] + /// Extension name (deprecated, use positional argument) + #[arg(short = 'e', long = "extension", hide = true)] extension: Option, /// Target architecture #[arg(short, long)] @@ -1817,6 +1883,8 @@ enum ExtCommands { }, /// Fetch remote extensions from repo, git, or path sources Fetch { + /// Extension name (if not provided, fetches all remote extensions) + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, @@ -1826,8 +1894,8 @@ enum ExtCommands { /// Force re-fetch even if already installed #[arg(short, long)] force: bool, - /// Name of the extension to fetch (if not provided, fetches all remote extensions) - #[arg(short = 'e', long = "extension")] + /// Extension name (deprecated, use positional argument) + #[arg(short = 'e', long = "extension", hide = true)] extension: Option, /// Target architecture #[arg(short, long)] @@ -1838,15 +1906,17 @@ enum ExtCommands { }, /// Build sysext and/or confext extensions from configuration Build { + /// Extension name (must be defined in config) + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Name of the extension to build (must be defined in config) - #[arg(short = 'e', long = "extension", required = true)] - extension: String, + /// Extension name (deprecated, use positional argument) + #[arg(short = 'e', long = "extension", hide = true)] + extension: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -1868,11 +1938,13 @@ enum ExtCommands { }, /// List dependencies for extensions Deps { + /// Extension name (if not provided, shows all extensions) + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, - /// Name of the extension to show dependencies for (if not provided, shows all extensions) - #[arg(short = 'e', long = "extension")] + /// Extension name (deprecated, use positional argument) + #[arg(short = 'e', long = "extension", hide = true)] extension: Option, /// Target architecture #[arg(short, long)] @@ -1904,15 +1976,17 @@ enum ExtCommands { }, /// Clean an extension's sysroot Clean { + /// Extension name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Name of the extension to clean - #[arg(short = 'e', long = "extension", required = true)] - extension: String, + /// Extension name (deprecated, use positional argument) + #[arg(short = 'e', long = "extension", hide = true)] + extension: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -1925,15 +1999,17 @@ enum ExtCommands { }, /// Check out files from extension sysroot to source directory Checkout { + /// Extension name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Name of the extension to checkout from - #[arg(short = 'e', long = "extension", required = true)] - extension: String, + /// Extension name (deprecated, use positional argument) + #[arg(short = 'e', long = "extension", hide = true)] + extension: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -1949,15 +2025,17 @@ enum ExtCommands { }, /// Create squashfs image from system extension Image { + /// Extension name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Name of the extension to create image for - #[arg(short = 'e', long = "extension", required = true)] - extension: String, + /// Extension name (deprecated, use positional argument) + #[arg(short = 'e', long = "extension", hide = true)] + extension: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -1973,15 +2051,17 @@ enum ExtCommands { }, /// Package extension sysroot into an RPM Package { + /// Extension name + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Name of the extension to package - #[arg(short = 'e', long = "extension", required = true)] - extension: String, + /// Extension name (deprecated, use positional argument) + #[arg(short = 'e', long = "extension", hide = true)] + extension: Option, /// Target architecture #[arg(short, long)] target: Option, From a8b3b06a8e8dcdd797c05b83c1fabd9c267b8cd5 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 19:40:58 -0500 Subject: [PATCH 09/12] extend positional name args to top-level provision/deploy/sign commands and fix commands - Add positional name arg to Commands::Provision, Deploy, Sign (hide -r/--runtime flag) - Update stamp fix_command() output to use positional syntax (avocado ext build app) - Update all tests to match new positional fix command format --- src/commands/ext/checkout.rs | 2 +- src/main.rs | 35 +++++++++++++++++++++++++++-------- src/utils/stamps.rs | 32 ++++++++++++++++---------------- 3 files changed, 44 insertions(+), 25 deletions(-) diff --git a/src/commands/ext/checkout.rs b/src/commands/ext/checkout.rs index cd5752d..bef8d56 100644 --- a/src/commands/ext/checkout.rs +++ b/src/commands/ext/checkout.rs @@ -607,7 +607,7 @@ mod tests { assert_eq!(requirements[0].fix_command(), "avocado sdk install"); assert_eq!( requirements[1].fix_command(), - "avocado ext install -e config-files" + "avocado ext install config-files" ); } diff --git a/src/main.rs b/src/main.rs index dca83fa..82f81d4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -263,6 +263,8 @@ enum Commands { }, /// Provision a runtime (shortcut for 'runtime provision') Provision { + /// Runtime name (must be defined in config) + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, @@ -272,9 +274,9 @@ enum Commands { /// Force the operation to proceed, bypassing warnings or confirmation prompts #[arg(short, long)] force: bool, - /// Runtime name to provision - #[arg(short = 'r', long = "runtime", required = true)] - runtime: String, + /// Runtime name to provision (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] + runtime: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -296,15 +298,17 @@ enum Commands { }, /// Deploy a runtime to a device (shortcut for 'runtime deploy') Deploy { + /// Runtime name (must be defined in config) + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Runtime name to deploy - #[arg(short = 'r', long = "runtime", required = true)] - runtime: String, + /// Runtime name to deploy (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] + runtime: Option, /// Target architecture #[arg(short, long)] target: Option, @@ -326,14 +330,16 @@ enum Commands { }, /// Sign runtime images (shortcut for 'runtime sign') Sign { + /// Runtime name to sign (if not provided, signs all runtimes with signing config) + name: Option, /// Path to avocado.yaml configuration file #[arg(short = 'C', long, default_value = "avocado.yaml")] config: String, /// Enable verbose output #[arg(short, long)] verbose: bool, - /// Runtime name to sign (if not provided, signs all runtimes with signing config) - #[arg(short = 'r', long = "runtime")] + /// Runtime name to sign (deprecated, use positional argument) + #[arg(short = 'r', long = "runtime", hide = true)] runtime: Option, /// Target architecture #[arg(short, long)] @@ -1066,6 +1072,7 @@ async fn main() -> Result<()> { Ok(()) } Commands::Provision { + name, config, verbose, force, @@ -1077,6 +1084,10 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name + .or(runtime) + .context("runtime name is required (provide as positional or -r/--runtime)")?; + // Validate runtime exists (required argument) validate_runtime_required(&config, &runtime)?; @@ -1101,6 +1112,7 @@ async fn main() -> Result<()> { Ok(()) } Commands::Deploy { + name, config, verbose, runtime, @@ -1109,6 +1121,10 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name + .or(runtime) + .context("runtime name is required (provide as positional or -r/--runtime)")?; + // Validate runtime exists (required argument) validate_runtime_required(&config, &runtime)?; @@ -1160,6 +1176,7 @@ async fn main() -> Result<()> { } }, Commands::Sign { + name, config, verbose, runtime, @@ -1167,6 +1184,8 @@ async fn main() -> Result<()> { container_args, dnf_args, } => { + let runtime = name.or(runtime); + // Validate runtime exists if provided validate_runtime_if_provided(&config, runtime.as_ref())?; diff --git a/src/utils/stamps.rs b/src/utils/stamps.rs index ebe2000..0115471 100644 --- a/src/utils/stamps.rs +++ b/src/utils/stamps.rs @@ -507,25 +507,25 @@ impl StampRequirement { None => "avocado sdk install".to_string(), }, (StampComponent::Extension, Some(name), StampCommand::Install) => { - format!("avocado ext install -e {name}") + format!("avocado ext install {name}") } (StampComponent::Extension, Some(name), StampCommand::Build) => { - format!("avocado ext build -e {name}") + format!("avocado ext build {name}") } (StampComponent::Extension, Some(name), StampCommand::Image) => { - format!("avocado ext image -e {name}") + format!("avocado ext image {name}") } (StampComponent::Runtime, Some(name), StampCommand::Install) => { - format!("avocado runtime install -r {name}") + format!("avocado runtime install {name}") } (StampComponent::Runtime, Some(name), StampCommand::Build) => { - format!("avocado runtime build -r {name}") + format!("avocado runtime build {name}") } (StampComponent::Runtime, Some(name), StampCommand::Sign) => { - format!("avocado runtime sign -r {name}") + format!("avocado runtime sign {name}") } (StampComponent::Runtime, Some(name), StampCommand::Provision) => { - format!("avocado runtime provision -r {name}") + format!("avocado runtime provision {name}") } _ => format!("avocado {} {}", self.component, self.command), } @@ -1363,11 +1363,11 @@ mod tests { let req = StampRequirement::ext_install("gpu-driver"); assert_eq!(req.description(), "extension 'gpu-driver' install"); - assert_eq!(req.fix_command(), "avocado ext install -e gpu-driver"); + assert_eq!(req.fix_command(), "avocado ext install gpu-driver"); let req = StampRequirement::runtime_build("my-runtime"); assert_eq!(req.description(), "runtime 'my-runtime' build"); - assert_eq!(req.fix_command(), "avocado runtime build -r my-runtime"); + assert_eq!(req.fix_command(), "avocado runtime build my-runtime"); } #[test] @@ -1570,7 +1570,7 @@ mod tests { assert!(error_str.contains("config changed")); assert!(error_str.contains("To fix:")); assert!(error_str.contains("avocado sdk install")); - assert!(error_str.contains("avocado ext install -e gpu-driver")); + assert!(error_str.contains("avocado ext install gpu-driver")); } #[test] @@ -1748,7 +1748,7 @@ mod tests { fn test_ext_image_requirement_description_and_fix() { let req = StampRequirement::ext_image("gpu-driver"); assert_eq!(req.description(), "extension 'gpu-driver' image"); - assert_eq!(req.fix_command(), "avocado ext image -e gpu-driver"); + assert_eq!(req.fix_command(), "avocado ext image gpu-driver"); assert_eq!(req.relative_path(), "ext/gpu-driver/image.stamp"); } @@ -1920,8 +1920,8 @@ ext/my-ext/build.stamp:::null"# // Verify fix commands are correct assert_eq!(reqs[0].fix_command(), "avocado sdk install"); - assert_eq!(reqs[1].fix_command(), "avocado ext install -e my-ext"); - assert_eq!(reqs[2].fix_command(), "avocado ext build -e my-ext"); + assert_eq!(reqs[1].fix_command(), "avocado ext install my-ext"); + assert_eq!(reqs[2].fix_command(), "avocado ext build my-ext"); // Verify descriptions are helpful (SDK now includes architecture) assert_eq!( @@ -1943,7 +1943,7 @@ ext/my-ext/build.stamp:::null"# assert_eq!(reqs.len(), 2); assert_eq!(reqs[0].fix_command(), "avocado sdk install"); - assert_eq!(reqs[1].fix_command(), "avocado ext install -e my-ext"); + assert_eq!(reqs[1].fix_command(), "avocado ext install my-ext"); } #[test] @@ -2260,8 +2260,8 @@ runtime/my-runtime/build.stamp:::null"#, // Should include all fix commands assert!(msg.contains("avocado sdk install")); - assert!(msg.contains("avocado ext install -e app")); - assert!(msg.contains("avocado ext build -e app")); + assert!(msg.contains("avocado ext install app")); + assert!(msg.contains("avocado ext build app")); } #[test] From be9b9bc22525ad2281ac5e04920c293161b442c5 Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 19:48:17 -0500 Subject: [PATCH 10/12] fix: pass AVOCADO_BUILD_DIR to kernel install script during runtime build The kernel install script needs AVOCADO_BUILD_DIR to locate the bzImage produced by the SDK compile step. Set it to $AVOCADO_SDK_PREFIX/build/
matching the path used during compilation. --- src/commands/runtime/build.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/commands/runtime/build.rs b/src/commands/runtime/build.rs index e3076d7..93b8577 100644 --- a/src/commands/runtime/build.rs +++ b/src/commands/runtime/build.rs @@ -293,7 +293,7 @@ impl RuntimeBuildCommand { target_arch, self.runtime_name ); let install_cmd = format!( - r#"mkdir -p "{runtime_build_dir}" && if [ -f '{install_script}' ]; then echo 'Running kernel install script: {install_script}'; export AVOCADO_RUNTIME_BUILD_DIR="{runtime_build_dir}"; bash '{install_script}'; else echo 'Kernel install script {install_script} not found.'; ls -la; exit 1; fi"# + r#"mkdir -p "{runtime_build_dir}" && if [ -f '{install_script}' ]; then echo 'Running kernel install script: {install_script}'; export AVOCADO_RUNTIME_BUILD_DIR="{runtime_build_dir}"; export AVOCADO_BUILD_DIR="$AVOCADO_SDK_PREFIX/build/{compile_section}"; bash '{install_script}'; else echo 'Kernel install script {install_script} not found.'; ls -la; exit 1; fi"# ); if self.verbose { From 3898caf2927823b139431adbdbe4a5d814ae8b6f Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 19:54:05 -0500 Subject: [PATCH 11/12] add automatic update notifications --- Cargo.toml | 1 + docs/features/update-notifications.md | 51 ++++++++++++++ src/main.rs | 23 ++++++- src/utils/mod.rs | 1 + src/utils/update_check.rs | 97 +++++++++++++++++++++++++++ 5 files changed, 172 insertions(+), 1 deletion(-) create mode 100644 docs/features/update-notifications.md create mode 100644 src/utils/update_check.rs diff --git a/Cargo.toml b/Cargo.toml index e37f352..42807b9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,6 +30,7 @@ tokio = { version = "1.0", features = [ "process", "io-util", "signal", + "time", ] } thiserror = "2.0" directories = "6.0" diff --git a/docs/features/update-notifications.md b/docs/features/update-notifications.md new file mode 100644 index 0000000..da65f04 --- /dev/null +++ b/docs/features/update-notifications.md @@ -0,0 +1,51 @@ +# Automatic Update Notifications + +The avocado CLI automatically checks for new releases and notifies you when a newer version is available, without requiring you to explicitly run `avocado upgrade`. + +## How It Works + +When you run any `avocado` command, a background task is spawned concurrently with your command to query the latest release from GitHub. After your command finishes, the result is checked and a notice is printed to stderr if a newer version exists: + +``` +[UPDATE] avocado 0.28.0 is available (you have 0.27.0). + Run 'avocado upgrade' to update. +``` + +The check runs in the background so it does not slow down your command. A 5-second timeout limits any additional wait if the command finishes before the check completes. + +## Caching + +Results are cached for **24 hours** in the platform cache directory (e.g. `~/.cache/avocado/update_check.json` on Linux). No network call is made if the cache is fresh. + +To force a fresh check, delete the cache file: + +```sh +rm ~/.cache/avocado/update_check.json +``` + +## Opting Out + +Set the `AVOCADO_NO_UPDATE_CHECK` environment variable to skip the check entirely: + +```sh +AVOCADO_NO_UPDATE_CHECK=1 avocado build +``` + +To disable permanently, add it to your shell profile (`~/.bashrc`, `~/.zshrc`, etc.). + +## Behavior Details + +| Scenario | Result | +|---|---| +| Cache hit (checked within 24h) | No network call; <1ms overhead | +| Cache miss, network available | Fetches GitHub API concurrently; notice shown at end if newer version found | +| Cache miss, offline | Silent — no error, no notice | +| Running `avocado upgrade` | Update check skipped (you are already upgrading) | +| `AVOCADO_NO_UPDATE_CHECK` set | Update check skipped entirely | + +## Implementation Notes + +- **Source**: [src/utils/update_check.rs](../../src/utils/update_check.rs) +- The check calls `https://api.github.com/repos/avocado-linux/avocado-cli/releases/latest` +- Output goes to **stderr** so it does not interfere with piped stdout output +- Requires no new dependencies — uses existing `reqwest`, `serde_json`, `directories`, and `semver` crates diff --git a/src/main.rs b/src/main.rs index 82f81d4..b1ad8fd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -859,7 +859,14 @@ fn build_env_vars( async fn main() -> Result<()> { let cli = Cli::parse(); - match cli.command { + let is_upgrade = matches!(cli.command, Commands::Upgrade { .. }); + let update_handle = if !is_upgrade { + Some(tokio::spawn(utils::update_check::check_for_update())) + } else { + None + }; + + let result = match cli.command { Commands::Init { directory, target, @@ -1869,7 +1876,21 @@ async fn main() -> Result<()> { Ok(()) } }, + }; + + if let Some(handle) = update_handle { + if let Ok(Ok(Some(version))) = + tokio::time::timeout(std::time::Duration::from_secs(5), handle).await + { + eprintln!( + "\n\x1b[93m[UPDATE]\x1b[0m avocado {} is available (you have {}).\n Run 'avocado upgrade' to update.", + version, + env!("CARGO_PKG_VERSION") + ); + } } + + result } #[derive(Subcommand)] diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 5376ff8..9488817 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -15,6 +15,7 @@ pub mod signing_keys; pub mod signing_service; pub mod stamps; pub mod target; +pub mod update_check; pub mod update_repo; pub mod update_signing; pub mod version; diff --git a/src/utils/update_check.rs b/src/utils/update_check.rs new file mode 100644 index 0000000..48c3ccb --- /dev/null +++ b/src/utils/update_check.rs @@ -0,0 +1,97 @@ +use std::{ + fs, + time::{SystemTime, UNIX_EPOCH}, +}; + +use directories::ProjectDirs; +use reqwest::ClientBuilder; +use semver::Version; +use serde::{Deserialize, Serialize}; + +const CHECK_INTERVAL_SECS: u64 = 60 * 60 * 24; // 24 hours +const FETCH_TIMEOUT_SECS: u64 = 5; + +#[derive(Serialize, Deserialize)] +struct UpdateCache { + last_checked_secs: u64, + latest_version: String, +} + +#[derive(Deserialize)] +struct GithubResponse { + tag_name: String, +} + +/// Returns the latest version string if a newer version is available, otherwise `None`. +/// +/// Results are cached for 24 hours to avoid unnecessary API calls. Set the +/// `AVOCADO_NO_UPDATE_CHECK` environment variable to any value to skip the check entirely. +/// Fails silently on network or filesystem errors. +pub async fn check_for_update() -> Option { + if std::env::var("AVOCADO_NO_UPDATE_CHECK").is_ok() { + return None; + } + + let proj_dirs = ProjectDirs::from("", "", "avocado")?; + let cache_path = proj_dirs.cache_dir().join("update_check.json"); + + let now_secs = SystemTime::now().duration_since(UNIX_EPOCH).ok()?.as_secs(); + + // Return cached result if still fresh. + if let Ok(data) = fs::read_to_string(&cache_path) { + if let Ok(cache) = serde_json::from_str::(&data) { + if now_secs.saturating_sub(cache.last_checked_secs) < CHECK_INTERVAL_SECS { + return is_newer(&cache.latest_version); + } + } + } + + // Fetch latest release from GitHub. + let latest = fetch_latest_version().await?; + + // Persist to cache (fail silently). + let cache = UpdateCache { + last_checked_secs: now_secs, + latest_version: latest.clone(), + }; + if let Ok(json) = serde_json::to_string(&cache) { + let _ = fs::create_dir_all(proj_dirs.cache_dir()); + let _ = fs::write(&cache_path, json); + } + + is_newer(&latest) +} + +fn is_newer(latest: &str) -> Option { + let latest_str = latest.trim_start_matches('v'); + let current_str = env!("CARGO_PKG_VERSION"); + + let latest_ver = Version::parse(latest_str).ok()?; + let current_ver = Version::parse(current_str).ok()?; + + if latest_ver > current_ver { + Some(latest_str.to_owned()) + } else { + None + } +} + +async fn fetch_latest_version() -> Option { + let client = ClientBuilder::new() + .use_rustls_tls() + .timeout(std::time::Duration::from_secs(FETCH_TIMEOUT_SECS)) + .build() + .ok()?; + + let resp = client + .get("https://api.github.com/repos/avocado-linux/avocado-cli/releases/latest") + .header("Accept", "application/vnd.github+json") + .header("User-Agent", "avocado-linux/avocado-cli") + .send() + .await + .ok()?; + + let resp = resp.error_for_status().ok()?; + let github: GithubResponse = resp.json().await.ok()?; + Some(github.tag_name) +} From ac56e0e2481b706c1b164926ad27dd169130ec1a Mon Sep 17 00:00:00 2001 From: Justin Schneck Date: Thu, 5 Mar 2026 19:17:47 -0500 Subject: [PATCH 12/12] release 0.27.0 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1360529..998c967 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -152,7 +152,7 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "avocado-cli" -version = "0.26.0" +version = "0.27.0" dependencies = [ "anyhow", "base64", diff --git a/Cargo.toml b/Cargo.toml index 42807b9..ffe5798 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "avocado-cli" -version = "0.26.0" +version = "0.27.0" edition = "2021" description = "Command line interface for Avocado." authors = ["Avocado"]