diff --git a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml index cb6629a7a2..a84984d193 100644 --- a/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml +++ b/imap_processing/cdf/config/imap_codice_l1a_variable_attrs.yaml @@ -162,7 +162,6 @@ priority_label: FORMAT: A2 VAR_TYPE: metadata - # Common energy labels for species for omni and sectored data energy_species_label: CATDESC: Energy Table for {species} @@ -214,6 +213,20 @@ data_quality: VALIDMAX: 1 VAR_TYPE: data +packet_version: + CATDESC: Packet version. Incremented each time the format of the packet changes. + DEPEND_0: epoch + DISPLAY_TYPE: time_series + FIELDNAM: Packet Version + FILLVAL: 65535 + FORMAT: I5 + LABLAXIS: Packet Version + SCALETYP: linear + UNITS: " " + VALIDMIN: 0 + VALIDMAX: 65535 + VAR_TYPE: data + voltage_table: CATDESC: ElectroStatic Analyzer Voltage Values DEPEND_1: esa_step @@ -242,7 +255,7 @@ nso_half_spin: CATDESC: When No Scan Operation (NSO) was activated DEPEND_0: epoch DISPLAY_TYPE: time_series - FIELDNAM: NSO Mode + FIELDNAM: NSO Half Spin FILLVAL: 255 FORMAT: I3 LABLAXIS: NSO Half Spin @@ -250,14 +263,44 @@ nso_half_spin: UNITS: half spin number VALIDMIN: 0 VALIDMAX: 255 - VAR_NOTES: Indicates the point when No Scan Operation (NSO) was activated. In NSO, the ESA voltage is set to the first step in the scan and remains fixed until the next cycle boundary. + VAR_NOTES: Indicates the half spin when No Scan Operation (NSO) was activated. In NSO, the ESA voltage is set to the first step in the scan and remains fixed until the next cycle boundary. + VAR_TYPE: data + +nso_spin_sector: + CATDESC: Spin Sector When No Scan Operation (NSO) was activated + DEPEND_0: epoch + DISPLAY_TYPE: time_series + FIELDNAM: NSO Spin Sector + FILLVAL: 255 + FORMAT: I3 + LABLAXIS: NSO Spin Sector + SCALETYP: linear + UNITS: spin sector + VALIDMIN: 0 + VALIDMAX: 255 + VAR_NOTES: Indicates the spin sector when No Scan Operation (NSO) was activated. In NSO, the ESA voltage is set to the first step in the scan and remains fixed until the next cycle boundary. + VAR_TYPE: data + +nso_esa_step: + CATDESC: Energy Step When No Scan Operation (NSO) was activated + DEPEND_0: epoch + DISPLAY_TYPE: time_series + FIELDNAM: NSO Energy Step + FILLVAL: 255 + FORMAT: I3 + LABLAXIS: NSO Energy Step + SCALETYP: linear + UNITS: energy step + VALIDMIN: 0 + VALIDMAX: 255 + VAR_NOTES: Indicates the energy step when No Scan Operation (NSO) was activated. In NSO, the ESA voltage is set to the first step in the scan and remains fixed until the next cycle boundary. VAR_TYPE: data rgfo_half_spin: - CATDESC: When Reduced Gain Factor Operation (RGFO) was activated + CATDESC: Half Spin When Reduced Gain Factor Operation (RGFO) was activated DEPEND_0: epoch DISPLAY_TYPE: time_series - FIELDNAM: RGFO Mode + FIELDNAM: RGFO Half Spin FILLVAL: 255 FORMAT: I3 LABLAXIS: RGFO Half Spin @@ -265,7 +308,37 @@ rgfo_half_spin: UNITS: half spin number VALIDMIN: 0 VALIDMAX: 255 - VAR_NOTES: Indicates the point when Reduced Gain Factor Operation (RGFO) was activated. In RGFO, the Entrance ESA voltage is reduced in order to limit the number of ions that reach the detectors. + VAR_NOTES: Indicates the half spin when Reduced Gain Factor Operation (RGFO) was activated. In RGFO, the Entrance ESA voltage is reduced in order to limit the number of ions that reach the detectors. + VAR_TYPE: data + +rgfo_spin_sector: + CATDESC: Spin Sector When Reduced Gain Factor Operation (RGFO) was activated + DEPEND_0: epoch + DISPLAY_TYPE: time_series + FIELDNAM: RGFO Spin Sector + FILLVAL: 255 + FORMAT: I3 + LABLAXIS: RGFO Spin Sector + SCALETYP: linear + UNITS: spin sector + VALIDMIN: 0 + VALIDMAX: 255 + VAR_NOTES: Indicates the spin sector when Reduced Gain Factor Operation (RGFO) was activated. In RGFO, the Entrance ESA voltage is reduced in order to limit the number of ions that reach the detectors. + VAR_TYPE: data + +rgfo_esa_step: + CATDESC: Energy Step When Reduced Gain Factor Operation (RGFO) was activated + DEPEND_0: epoch + DISPLAY_TYPE: time_series + FIELDNAM: RGFO Energy Step + FILLVAL: 255 + FORMAT: I3 + LABLAXIS: RGFO Energy Step + SCALETYP: linear + UNITS: energy step + VALIDMIN: 0 + VALIDMAX: 255 + VAR_NOTES: Indicates the energy step when Reduced Gain Factor Operation (RGFO) was activated. In RGFO, the Entrance ESA voltage is reduced in order to limit the number of ions that reach the detectors. VAR_TYPE: data spin_period: diff --git a/imap_processing/codice/codice_l1a.py b/imap_processing/codice/codice_l1a.py index 5bc0a3a9a3..578159a55a 100644 --- a/imap_processing/codice/codice_l1a.py +++ b/imap_processing/codice/codice_l1a.py @@ -1,6 +1,8 @@ """CoDICE L1A processing functions.""" +import datetime import logging +import os import xarray as xr from imap_data_access import ProcessingInputCollection @@ -52,16 +54,22 @@ def process_l1a( # noqa: PLR0912 """ # Get science data which is L0 packet file science_file = dependency.get_file_paths(data_type="l0")[0] + # TODO get the exact time the FSW changed on january 29 and relabel the xml file + # On January 29, 2026, the CoDICE flight software was updated to a new version. + # This update included changes to the packet definitions. + start_date = datetime.datetime.strptime( + os.path.basename(science_file).split("_")[4], "%Y%m%d" + ) # Extract the date from the filename + path = imap_module_directory / "codice/packet_definitions/" + if start_date >= datetime.datetime(2026, 1, 29): + xtce_file = path / "imap_codice_packet-definition_20260129_v001.xml" + else: + xtce_file = path / "imap_codice_packet-definition_20250101_v001.xml" - xtce_file = ( - imap_module_directory / "codice/packet_definitions/codice_packet_definition.xml" - ) - # Decom packet datasets_by_apid = packet_file_to_datasets( science_file, xtce_file, ) - datasets = [] for apid in datasets_by_apid: if apid not in [CODICEAPID.COD_LO_PHA, CODICEAPID.COD_HI_PHA]: diff --git a/imap_processing/codice/codice_l1a_de.py b/imap_processing/codice/codice_l1a_de.py index fadaea2c2f..abc8ad0303 100644 --- a/imap_processing/codice/codice_l1a_de.py +++ b/imap_processing/codice/codice_l1a_de.py @@ -56,9 +56,16 @@ def extract_initial_items_from_combined_packets( spare_1 = np.zeros(n_packets, dtype=np.uint8) st_bias_gain_mode = np.zeros(n_packets, dtype=np.uint8) sw_bias_gain_mode = np.zeros(n_packets, dtype=np.uint8) - priority = np.zeros(n_packets, dtype=np.uint8) suspect = np.zeros(n_packets, dtype=np.uint8) + priority = np.zeros(n_packets, dtype=np.uint8) compressed = np.zeros(n_packets, dtype=np.uint8) + rgfo_half_spin = np.zeros(n_packets, dtype=np.uint8) + rgfo_esa_step = np.zeros(n_packets, dtype=np.uint8) + rgfo_spin_sector = np.zeros(n_packets, dtype=np.uint8) + nso_half_spin = np.zeros(n_packets, dtype=np.uint8) + nso_spin_sector = np.zeros(n_packets, dtype=np.uint8) + nso_esa_step = np.zeros(n_packets, dtype=np.uint8) + spare_2 = np.zeros(n_packets, dtype=np.uint16) num_events = np.zeros(n_packets, dtype=np.uint32) byte_count = np.zeros(n_packets, dtype=np.uint32) @@ -89,20 +96,42 @@ def extract_initial_items_from_combined_packets( suspect[pkt_idx] = (mixed_bytes >> 1) & 0x1 # compressed: 1 bit (LSB) compressed[pkt_idx] = mixed_bytes & 0x1 - - # Remaining byte-aligned fields - num_events[pkt_idx] = int.from_bytes(event_data[12:16], byteorder="big") - byte_count[pkt_idx] = int.from_bytes(event_data[16:20], byteorder="big") - - # Remove the first 20 bytes from event_data (header fields from above) + # After packet version 1, the fields below are present in event_data + if packet_version[pkt_idx] > 1: + # All of the fields below are single byte fields + rgfo_half_spin[pkt_idx] = event_data[12] + rgfo_spin_sector[pkt_idx] = event_data[13] + rgfo_esa_step[pkt_idx] = event_data[14] + nso_half_spin[pkt_idx] = event_data[15] + nso_spin_sector[pkt_idx] = event_data[16] + nso_esa_step[pkt_idx] = event_data[17] + + # spare_2 is 16 bits + spare_2[pkt_idx] = int.from_bytes(event_data[18:20], byteorder="big") + # Remaining byte-aligned fields + num_events[pkt_idx] = int.from_bytes(event_data[20:24], byteorder="big") + byte_count[pkt_idx] = int.from_bytes(event_data[24:28], byteorder="big") + # Header is 28 bytes total for version > 1 + len_header = 28 + else: + # Remaining byte-aligned fields + num_events[pkt_idx] = int.from_bytes(event_data[12:16], byteorder="big") + byte_count[pkt_idx] = int.from_bytes(event_data[16:20], byteorder="big") + # Header is 20 bytes total for version 1 + len_header = 20 + + # Remove the first len_header bytes from event_data (header fields from above) # Then trim to the number of bytes indicated by byte_count - if byte_count[pkt_idx] > len(event_data) - 20: + if byte_count[pkt_idx] > len(event_data) - len_header: raise ValueError( f"Byte count {byte_count[pkt_idx]} exceeds available " - f"data length {len(event_data) - 20} for packet index {pkt_idx}." + f"data length {len(event_data) - len_header} for packet index" + f" {pkt_idx}." ) - packets.event_data.data[pkt_idx] = event_data[20 : 20 + byte_count[pkt_idx]] + packets.event_data.data[pkt_idx] = event_data[ + len_header : byte_count[pkt_idx] + len_header + ] if compressed[pkt_idx]: packets.event_data.data[pkt_idx] = decompress( packets.event_data.data[pkt_idx], @@ -120,6 +149,13 @@ def extract_initial_items_from_combined_packets( packets["priority"] = xr.DataArray(priority, dims=["epoch"]) packets["suspect"] = xr.DataArray(suspect, dims=["epoch"]) packets["compressed"] = xr.DataArray(compressed, dims=["epoch"]) + packets["rgfo_half_spin"] = xr.DataArray(rgfo_half_spin, dims=["epoch"]) + packets["rgfo_spin_sector"] = xr.DataArray(rgfo_spin_sector, dims=["epoch"]) + packets["rgfo_esa_step"] = xr.DataArray(rgfo_esa_step, dims=["epoch"]) + packets["nso_half_spin"] = xr.DataArray(nso_half_spin, dims=["epoch"]) + packets["nso_spin_sector"] = xr.DataArray(nso_spin_sector, dims=["epoch"]) + packets["nso_esa_step"] = xr.DataArray(nso_esa_step, dims=["epoch"]) + packets["spare_2"] = xr.DataArray(spare_2, dims=["epoch"]) packets["num_events"] = xr.DataArray(num_events, dims=["epoch"]) packets["byte_count"] = xr.DataArray(byte_count, dims=["epoch"]) @@ -203,6 +239,7 @@ def _create_dataset_coords( collapse_table=0, three_d_collapsed=0, view_id=0, + compression=CoDICECompression.LOSSLESS.value, # DE data is always lossless ) epochs, epochs_delta = get_codice_epoch_time( packets["acq_start_seconds"].isel(epoch=epoch_slice), @@ -316,7 +353,6 @@ def _unpack_and_store_events( n_events = int(num_events_arr[pkt_idx]) if n_events == 0: continue - # Extract and byte-reverse events for LSB unpacking pkt_bytes = np.asarray(event_data_arr[pkt_idx], dtype=np.uint8) pkt_bytes = pkt_bytes.reshape(n_events, 8)[:, ::-1] @@ -450,7 +486,16 @@ def process_de_data( # Add per-epoch metadata from first packet of each epoch epoch_slice = slice(None, None, num_priorities) - for var in ["sw_bias_gain_mode", "st_bias_gain_mode"]: + for var in [ + "sw_bias_gain_mode", + "st_bias_gain_mode", + "rgfo_esa_step", + "rgfo_half_spin", + "rgfo_spin_sector", + "nso_esa_step", + "nso_half_spin", + "nso_spin_sector", + ]: de_data[var] = xr.DataArray( packets[var].isel(epoch=epoch_slice).values, dims=["epoch"], @@ -482,7 +527,6 @@ def process_de_data( dims=["epoch", "priority"], attrs=cdf_attrs.get_variable_attributes("de_2d_attrs"), ) - # Reshape packet arrays for validation and assignment priorities_2d = packets.priority.values.reshape(num_epochs, num_priorities) num_events_2d = packets.num_events.values.reshape(num_epochs, num_priorities) @@ -534,6 +578,7 @@ def l1a_direct_event(unpacked_dataset: xr.Dataset, apid: int) -> xr.Dataset: packets = combine_segmented_packets( unpacked_dataset, binary_field_name="event_data" ) + packets = extract_initial_items_from_combined_packets(packets) # Gather the CDF attributes diff --git a/imap_processing/codice/codice_l1a_hi_counters_aggregated.py b/imap_processing/codice/codice_l1a_hi_counters_aggregated.py index cfe50c6195..ef815c347b 100644 --- a/imap_processing/codice/codice_l1a_hi_counters_aggregated.py +++ b/imap_processing/codice/codice_l1a_hi_counters_aggregated.py @@ -10,6 +10,7 @@ from imap_processing.codice import constants from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( + CoDICECompression, ViewTabInfo, get_codice_epoch_time, get_counters_aggregated_pattern, @@ -61,6 +62,7 @@ def l1a_hi_counters_aggregated( sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 1: @@ -86,7 +88,7 @@ def l1a_hi_counters_aggregated( binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values - compression_algorithm = constants.HI_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) # The decompressed data in the shape of (epoch, n). Then reshape later. decompressed_data = [ diff --git a/imap_processing/codice/codice_l1a_hi_counters_singles.py b/imap_processing/codice/codice_l1a_hi_counters_singles.py index 93e450373d..9da96b6ae2 100644 --- a/imap_processing/codice/codice_l1a_hi_counters_singles.py +++ b/imap_processing/codice/codice_l1a_hi_counters_singles.py @@ -10,6 +10,7 @@ from imap_processing.codice import constants from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( + CoDICECompression, ViewTabInfo, get_codice_epoch_time, get_collapse_pattern_shape, @@ -59,6 +60,7 @@ def l1a_hi_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 1: @@ -78,7 +80,7 @@ def l1a_hi_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. # spin sector size is 1. inst_az = collapse_shape[1] - compression_algorithm = constants.HI_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) # Decompress data using byte count information from decommed data binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values diff --git a/imap_processing/codice/codice_l1a_hi_omni.py b/imap_processing/codice/codice_l1a_hi_omni.py index 656380c6a0..d9dc24a6e3 100644 --- a/imap_processing/codice/codice_l1a_hi_omni.py +++ b/imap_processing/codice/codice_l1a_hi_omni.py @@ -10,6 +10,7 @@ from imap_processing.codice import constants from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( + CoDICECompression, ViewTabInfo, apply_replacements_to_attrs, get_codice_epoch_time, @@ -61,6 +62,7 @@ def l1a_hi_omni(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 1: @@ -71,7 +73,7 @@ def l1a_hi_omni(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_names = species_data.keys() logical_source_id = "imap_codice_l1a_hi-omni" - compression_algorithm = constants.HI_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) # Decompress data using byte count information from decommed data binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values diff --git a/imap_processing/codice/codice_l1a_hi_priority.py b/imap_processing/codice/codice_l1a_hi_priority.py index 7de581978d..d67e17daf1 100644 --- a/imap_processing/codice/codice_l1a_hi_priority.py +++ b/imap_processing/codice/codice_l1a_hi_priority.py @@ -10,6 +10,7 @@ from imap_processing.codice import constants from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( + CoDICECompression, ViewTabInfo, get_codice_epoch_time, get_collapse_pattern_shape, @@ -62,6 +63,7 @@ def l1a_hi_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 1: @@ -80,15 +82,27 @@ def l1a_hi_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_data = sci_lut_data["data_product_hi_tab"]["0"]["priority"] species_names = species_data.keys() logical_source_id = "imap_codice_l1a_hi-priority" - compression_algorithm = constants.HI_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] - + compression_algorithm = CoDICECompression(view_tab_obj.compression) # Decompress data using byte count information from decommed data binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values - + packet_version = unpacked_dataset["packet_version"].values[0] # The decompressed data in the shape of (epoch, n). Then reshape later. decompressed_data = [ - decompress( + np.frombuffer( + bytes( + decompress( + packet_data[:byte_count], + compression_algorithm, + ) + ), + dtype=">u4", + # '>' means big-endian, 'u4' means unsigned 4-byte integer (uint32) + ) + # For newer packet versions, the decompressed data needs to be converted to + # uint32 + if packet_version > 1 + else decompress( packet_data[:byte_count], compression_algorithm, ) diff --git a/imap_processing/codice/codice_l1a_hi_sectored.py b/imap_processing/codice/codice_l1a_hi_sectored.py index 1a147e2b40..440d780958 100644 --- a/imap_processing/codice/codice_l1a_hi_sectored.py +++ b/imap_processing/codice/codice_l1a_hi_sectored.py @@ -10,6 +10,7 @@ from imap_processing.codice import constants from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( + CoDICECompression, ViewTabInfo, apply_replacements_to_attrs, get_codice_epoch_time, @@ -63,6 +64,7 @@ def l1a_hi_sectored(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 1: @@ -82,7 +84,7 @@ def l1a_hi_sectored(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species_names = species_data.keys() logical_source_id = "imap_codice_l1a_hi-sectored" - compression_algorithm = constants.HI_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) # Decompress data using byte count information from decommed data binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values diff --git a/imap_processing/codice/codice_l1a_ialirt_hi.py b/imap_processing/codice/codice_l1a_ialirt_hi.py index a7a4dcd68e..09c8c07db1 100644 --- a/imap_processing/codice/codice_l1a_ialirt_hi.py +++ b/imap_processing/codice/codice_l1a_ialirt_hi.py @@ -9,6 +9,7 @@ from imap_processing.codice import constants from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( + CoDICECompression, ViewTabInfo, get_codice_epoch_time, get_collapse_pattern_shape, @@ -59,13 +60,14 @@ def l1a_ialirt_hi(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) species_data = sci_lut_data["data_product_hi_tab"]["0"]["ialirt"] first_species = next(iter(species_data)) centers, energy_minus, energy_plus = get_energy_info(species_data[first_species]) - compression_algorithm = constants.HI_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values diff --git a/imap_processing/codice/codice_l1a_lo_angular.py b/imap_processing/codice/codice_l1a_lo_angular.py index 1696895fbd..907b83789f 100644 --- a/imap_processing/codice/codice_l1a_lo_angular.py +++ b/imap_processing/codice/codice_l1a_lo_angular.py @@ -8,10 +8,15 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants -from imap_processing.codice.constants import HALF_SPIN_FILLVAL +from imap_processing.codice.constants import ( + HALF_SPIN_FILLVAL, + LO_NSW_ANGULAR_VARIABLE_NAMES, + LO_SW_ANGULAR_VARIABLE_NAMES, +) from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, + CoDICECompression, ViewTabInfo, calculate_acq_time_per_step, get_codice_epoch_time, @@ -59,7 +64,7 @@ def _despin_species_data( # 24 is derived by multiplying spin sector dim from collapse table by 2 spin_sector_len = constants.LO_DESPIN_SPIN_SECTORS despun_shape = (num_packets, num_species, esa_steps, spin_sector_len, inst_az_dim) - despun_data = np.full(despun_shape, 0) + despun_data = np.full(despun_shape, 0.0, dtype=np.float64) # Pixel orientation array and mapping positions pixel_orientation = np.array( sci_lut_data["lo_stepping_tab"]["pixel_orientation"]["data"] @@ -94,7 +99,7 @@ def _despin_species_data( return despun_data -def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: +def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: # noqa: PLR0912 """ L1A processing code. @@ -134,6 +139,7 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 0: @@ -141,20 +147,34 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: # ========= Decompress and Reshape Data =========== # Lookup SW or NSW species based on APID + # We also need to determine if there are any species that should be backfilled + # with fill values if view_tab_obj.apid == CODICEAPID.COD_LO_SW_ANGULAR_COUNTS: - species_names = sci_lut_data["data_product_lo_tab"]["0"]["angular"]["sw"][ - "species_names" - ] + actual_species_names = sci_lut_data["data_product_lo_tab"]["0"]["angular"][ + "sw" + ]["species_names"] + desired_species_names = set( + sci_lut_data["data_product_lo_tab"]["0"]["angular"]["sw"][ + "desired_species_names" + ] + + LO_SW_ANGULAR_VARIABLE_NAMES + ) logical_source_id = "imap_codice_l1a_lo-sw-angular" elif view_tab_obj.apid == CODICEAPID.COD_LO_NSW_ANGULAR_COUNTS: - species_names = sci_lut_data["data_product_lo_tab"]["0"]["angular"]["nsw"][ - "species_names" - ] + actual_species_names = sci_lut_data["data_product_lo_tab"]["0"]["angular"][ + "nsw" + ]["species_names"] + desired_species_names = set( + sci_lut_data["data_product_lo_tab"]["0"]["angular"]["nsw"][ + "desired_species_names" + ] + + LO_NSW_ANGULAR_VARIABLE_NAMES + ) logical_source_id = "imap_codice_l1a_lo-nsw-angular" else: raise ValueError(f"Unknown apid {view_tab_obj.apid} in Lo species processing.") - compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) # Decompress data using byte count information from decommed data binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values @@ -180,16 +200,13 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: # 24 includes despinning spin sector. Then at later steps, # we handle despinning. num_packets = len(binary_data_list) - esa_steps = constants.NUM_ESA_STEPS - num_species = len(species_names) + num_esa_steps = constants.NUM_ESA_STEPS + num_species = len(actual_species_names) + num_spin_sectors = collapsed_shape[0] species_data = np.array(decompressed_data, dtype=np.uint32).reshape( - num_packets, num_species, esa_steps, *collapsed_shape + num_packets, num_species, num_esa_steps, *collapsed_shape ) - # Despinning - # ---------------- - species_data = _despin_species_data(species_data, sci_lut_data, view_tab_obj) - # ========== Get Voltage Data from LUT =========== # Use plan id and plan step to get voltage data's table_number in ESA sweep table. # Voltage data is (128,) @@ -197,11 +214,10 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: "lo_stepping" ] voltage_data = sci_lut_data["esa_sweep_tab"][f"{esa_table_number}"] - # If data size is less than 128, pad with fillval to make it 128 half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") - if len(half_spin_per_esa_step) < constants.NUM_ESA_STEPS: - pad_size = constants.NUM_ESA_STEPS - len(half_spin_per_esa_step) + if len(half_spin_per_esa_step) < num_esa_steps: + pad_size = num_esa_steps - len(half_spin_per_esa_step) half_spin_per_esa_step = np.concatenate( (np.array(half_spin_per_esa_step), np.full(pad_size, HALF_SPIN_FILLVAL)) ) @@ -221,19 +237,89 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: np.asarray(acquisition_time_per_step), (len(unpacked_dataset["acq_start_seconds"]), 1), ) + # ========== Apply NSO/RGFO Masking =========== + # After FSW changes on 20260129, The Lo L1A product contains variables that + # indicate the esa step and spin sector during which the RGFO or NSO limits are + # triggered. The spin sector variable ranges from 0-11 and is the instrument + # reported spin sector. The following algorithm defines when to assign NaN to the + # angular data product due to NSO + # operation: + # 1. For half_spin > nso_half_spin a set all data to NaN + # 2. For half_spin = nso_half_spin + # a. For spin_sector > nso_spin_sector a set all data to NaN + # b. For spin_sector = nso_spin_sector + # i. For esa_step > nso_esa_step a set all data to NaN # For every energy after nso_half_spin, set data to fill values + # For data before 20260129 ( packet_version <=1 ) set all data to NaN where + # half_spin > nso_half_spin + packet_versions = unpacked_dataset["packet_version"].values nso_half_spin = unpacked_dataset["nso_half_spin"].values - nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | ( - half_spin_per_esa_step == HALF_SPIN_FILLVAL - ) - species_mask = nso_mask[:, np.newaxis, :, np.newaxis, np.newaxis] - species_mask = np.broadcast_to(species_mask, species_data.shape) + # TODO handle boundary days where the FSW changed halfway through the dataset. E.g + # Some packet_version = 1 and some = 2 + if packet_versions[0] <= 1: + # For half_spin >= NSO_half_spin, set to NaN + half_spin_mask = (half_spin_per_esa_step >= nso_half_spin[:, np.newaxis]) | ( + half_spin_per_esa_step == HALF_SPIN_FILLVAL + ) + species_mask = half_spin_mask[:, np.newaxis, :, np.newaxis, np.newaxis] + species_mask = np.broadcast_to(species_mask, species_data.shape) + else: + # nso_spin_sector and nso_esa_step for comparison. Shape (epoch, 1, 1) + # to broadcast + nso_spin_sector = unpacked_dataset["nso_spin_sector"].values[ + :, np.newaxis, np.newaxis + ] + nso_esa_step = unpacked_dataset["nso_energy_step"].values[ + :, np.newaxis, np.newaxis + ] + # Create arrays for spin sectors and esa steps to compare with nso values. + # Shape (1, 1, spin_sector) and (1, esa_step, 1) + spin_sectors = np.arange(num_spin_sectors)[np.newaxis, np.newaxis, :] + esa_steps = np.arange(num_esa_steps)[np.newaxis, :, np.newaxis] + # Create a mask for half_spin > nso_half_spin. Shape (epoch, esa_step)) + # This will be used below to set half_spin_per_esa_step to fillval and + # acquisition_time_per_step to NaN for those steps. + half_spin_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | ( + half_spin_per_esa_step == HALF_SPIN_FILLVAL + ) + # Create a mask for the boundary condition where half_spin == nso_half_spin. + at_boundary = ( + half_spin_per_esa_step[:, :, np.newaxis] + == nso_half_spin[:, np.newaxis, np.newaxis] + ) + boundary_half_spin_mask = ( + at_boundary + & + # For spin_sector > nso_spin_sector, set to NaN + ( + (spin_sectors > nso_spin_sector) + | + # For spin_sector = nso_spin_sector and esa_step > nso_esa_step, + # set to NaN + ((spin_sectors == nso_spin_sector) & (esa_steps > nso_esa_step)) + ) + ) + + # Combine masks. Shape (epoch, esa_step, spin_sector). This mask is True + # where data should be set to NaN + nso_mask = half_spin_mask[:, :, np.newaxis] | boundary_half_spin_mask + # Expand nso_mask to (epoch, 1, esa_step, spin_sector, 1) to apply to + # species_data. + species_mask = np.broadcast_to( + nso_mask[:, np.newaxis, :, :, np.newaxis], species_data.shape + ) + species_data = species_data.astype(np.float64) species_data[species_mask] = np.nan - # Set half_spin_per_esa_step to (fillval) where nso_mask is True - half_spin_per_esa_step[nso_mask] = HALF_SPIN_FILLVAL - # Set acquisition_time_per_step to nan where nso_mask is True - acquisition_time_per_step[nso_mask] = np.nan + # Set half_spin_per_esa_step to (fillval) where half_spin mask is True + half_spin_per_esa_step[half_spin_mask] = HALF_SPIN_FILLVAL + # Set acquisition_time_per_step to nan where half_spin_mask is True + acquisition_time_per_step[half_spin_mask] = np.nan + + # Despinning + # ---------------- + species_data = _despin_species_data(species_data, sci_lut_data, view_tab_obj) + # ========= Get Epoch Time Data =========== # Epoch center time and delta epoch_center, deltas = get_codice_epoch_time( @@ -355,6 +441,33 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: "acquisition_time_per_esa_step", check_schema=False ), ) + # Rename vars + unpacked_dataset = unpacked_dataset.rename( + { + k: v + for k, v in [ + ("rgfo_energy_step", "rgfo_esa_step"), + ("nso_energy_step", "nso_esa_step"), + ] + if k in unpacked_dataset + } + ) + # These variables were added to the packet definition after 20260129, so they only + # exist in the unpacked dataset if packet_version > 1 + # If they don't exist, initialize them with fill val arrays since they won't be + # used in the NSO/RGFO masking logic but should still exist in l1a for SPDF + # compliance/consistency. + l1a_additional_vars = [ + "rgfo_spin_sector", + "rgfo_esa_step", + "nso_spin_sector", + "nso_esa_step", + ] + for var in l1a_additional_vars: + if var not in unpacked_dataset: + unpacked_dataset[var] = np.full( + unpacked_dataset.sizes["epoch"], fill_value=np.nan + ) # Carry over these variables from unpacked data to l1a_dataset l1a_carryover_vars = [ @@ -362,6 +475,8 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: "st_bias_gain_mode", "rgfo_half_spin", "nso_half_spin", + "packet_version", + *l1a_additional_vars, ] # Loop through them since we need to set their attrs too for var in l1a_carryover_vars: @@ -370,12 +485,24 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes(var), ) + # Loop through the species we want in the final dataset (desired_species_names) and + # add them if they exist in the actual species names from the LUT. + # This is to handle the bug in which the spacecraft was sending data down "off by + # one" and getting mislabeled. + for species in desired_species_names: + if species not in actual_species_names: + logger.warning( + f"Desired species {species} not found in actual species names from " + f"LUT. This species will be filled with fill values in the final " + f"dataset. Actual species names: {actual_species_names}" + ) + species_data_individual = np.full(species_data[:, 0, :, :, :].shape, np.nan) + else: + species_idx = actual_species_names.index(species) + species_data_individual = species_data[:, species_idx, :, :, :] - # Finally, add species data variables and their uncertainties - for species_data_idx, species in enumerate(species_names): species_attrs = cdf_attrs.get_variable_attributes("lo-angular-attrs") unc_attrs = cdf_attrs.get_variable_attributes("lo-angular-unc-attrs") - direction = ( "Sunward" if view_tab_obj.apid == CODICEAPID.COD_LO_SW_ANGULAR_COUNTS @@ -389,7 +516,7 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species=species, direction=direction ) l1a_dataset[species] = xr.DataArray( - species_data[:, species_data_idx, :, :, :], + species_data_individual, dims=("epoch", "esa_step", "spin_sector", "inst_az"), attrs=species_attrs, ) diff --git a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py index fae1484ff0..a390d0dd4d 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_aggregated.py +++ b/imap_processing/codice/codice_l1a_lo_counters_aggregated.py @@ -11,6 +11,7 @@ from imap_processing.codice.constants import HALF_SPIN_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( + CoDICECompression, ViewTabInfo, calculate_acq_time_per_step, get_codice_epoch_time, @@ -63,6 +64,7 @@ def l1a_lo_counters_aggregated( sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 0: @@ -98,7 +100,7 @@ def l1a_lo_counters_aggregated( binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values - compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) # The decompressed data in the shape of (epoch, n). Then reshape later. decompressed_data = [ @@ -140,7 +142,7 @@ def l1a_lo_counters_aggregated( ) # For every energy after nso_half_spin, set data to fill values nso_half_spin = unpacked_dataset["nso_half_spin"].values - nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | ( + nso_mask = (half_spin_per_esa_step >= nso_half_spin[:, np.newaxis]) | ( half_spin_per_esa_step == HALF_SPIN_FILLVAL ) counters_mask = nso_mask[:, :, np.newaxis, np.newaxis] @@ -255,6 +257,33 @@ def l1a_lo_counters_aggregated( "acquisition_time_per_esa_step", check_schema=False ), ) + # Rename vars + unpacked_dataset = unpacked_dataset.rename( + { + k: v + for k, v in [ + ("rgfo_energy_step", "rgfo_esa_step"), + ("nso_energy_step", "nso_esa_step"), + ] + if k in unpacked_dataset + } + ) + # These variables were added to the packet definition after 20260129, so they only + # exist in the unpacked dataset if packet_version > 1 + # If they don't exist, initialize them with fill val arrays since they won't be + # used in the NSO/RGFO masking logic but should still exist in l1a for SPDF + # compliance/consistency. + l1a_additional_vars = [ + "rgfo_spin_sector", + "rgfo_esa_step", + "nso_spin_sector", + "nso_esa_step", + ] + for var in l1a_additional_vars: + if var not in unpacked_dataset: + unpacked_dataset[var] = np.full( + unpacked_dataset.sizes["epoch"], fill_value=np.nan + ) # Carry over these variables from unpacked data to l1a_dataset l1a_carryover_vars = [ @@ -262,6 +291,7 @@ def l1a_lo_counters_aggregated( "st_bias_gain_mode", "rgfo_half_spin", "nso_half_spin", + *l1a_additional_vars, ] # Loop through them since we need to set their attrs too for var in l1a_carryover_vars: @@ -270,7 +300,6 @@ def l1a_lo_counters_aggregated( dims=("epoch",), attrs=cdf_attrs.get_variable_attributes(var), ) - # Finally, add data variables for idx, variable in enumerate(non_reserved_variables): # We don't store reserved variables in CDF diff --git a/imap_processing/codice/codice_l1a_lo_counters_singles.py b/imap_processing/codice/codice_l1a_lo_counters_singles.py index 50e7d2f552..2f8b003539 100644 --- a/imap_processing/codice/codice_l1a_lo_counters_singles.py +++ b/imap_processing/codice/codice_l1a_lo_counters_singles.py @@ -11,6 +11,7 @@ from imap_processing.codice.constants import HALF_SPIN_FILLVAL from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( + CoDICECompression, ViewTabInfo, calculate_acq_time_per_step, get_codice_epoch_time, @@ -61,6 +62,7 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 0: @@ -73,7 +75,6 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. "lo_stepping" ] voltage_data = sci_lut_data["esa_sweep_tab"][f"{esa_table_number}"] - # ========= Decompress and Reshape Data =========== logical_source_id = "imap_codice_l1a_lo-counters-singles" @@ -90,7 +91,7 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. inst_az = collapse_shape[1] esa_step = len(voltage_data) - compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) # Decompress data using byte count information from decommed data binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values @@ -137,7 +138,7 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. ) # For every energy after nso_half_spin, set data to fill values nso_half_spin = unpacked_dataset["nso_half_spin"].values - nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | ( + nso_mask = (half_spin_per_esa_step >= nso_half_spin[:, np.newaxis]) | ( half_spin_per_esa_step == HALF_SPIN_FILLVAL ) counters_mask = nso_mask[:, :, np.newaxis, np.newaxis] @@ -264,6 +265,33 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. "acquisition_time_per_esa_step", check_schema=False ), ) + # Rename vars + unpacked_dataset = unpacked_dataset.rename( + { + k: v + for k, v in [ + ("rgfo_energy_step", "rgfo_esa_step"), + ("nso_energy_step", "nso_esa_step"), + ] + if k in unpacked_dataset + } + ) + # These variables were added to the packet definition after 20260129, so they only + # exist in the unpacked dataset if packet_version > 1 + # If they don't exist, initialize them with fill val arrays since they won't be + # used in the NSO/RGFO masking logic but should still exist in l1a for SPDF + # compliance/consistency. + l1a_additional_vars = [ + "rgfo_spin_sector", + "rgfo_esa_step", + "nso_spin_sector", + "nso_esa_step", + ] + for var in l1a_additional_vars: + if var not in unpacked_dataset: + unpacked_dataset[var] = np.full( + unpacked_dataset.sizes["epoch"], fill_value=np.nan + ) # Carry over these variables from unpacked data to l1a_dataset l1a_carryover_vars = [ @@ -271,6 +299,7 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. "st_bias_gain_mode", "rgfo_half_spin", "nso_half_spin", + *l1a_additional_vars, ] # Loop through them since we need to set their attrs too for var in l1a_carryover_vars: @@ -279,7 +308,6 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr. dims=("epoch",), attrs=cdf_attrs.get_variable_attributes(var), ) - # Finally, add species data variables and their uncertainties. # Since singles only has one variable, we can directly add it here. l1a_dataset["apd_singles"] = xr.DataArray( diff --git a/imap_processing/codice/codice_l1a_lo_priority.py b/imap_processing/codice/codice_l1a_lo_priority.py index d0930309a9..7a66c167fd 100644 --- a/imap_processing/codice/codice_l1a_lo_priority.py +++ b/imap_processing/codice/codice_l1a_lo_priority.py @@ -12,6 +12,7 @@ from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, + CoDICECompression, ViewTabInfo, calculate_acq_time_per_step, get_codice_epoch_time, @@ -64,6 +65,7 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) if view_tab_obj.sensor != 0: @@ -93,13 +95,13 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: "species_names" ] logical_source_id = "imap_codice_l1a_lo-sw-priority" - compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) elif apid == CODICEAPID.COD_LO_NSW_PRIORITY_COUNTS: species_names = sci_lut_data["data_product_lo_tab"]["0"]["priority"]["nsw"][ "species_names" ] logical_source_id = "imap_codice_l1a_lo-nsw-priority" - compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) else: raise ValueError("Unsupported APID for Lo priority processing.") @@ -107,9 +109,22 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values + packet_version = unpacked_dataset["packet_version"].values[0] # The decompressed data in the shape of (epoch, n). Then reshape later. decompressed_data = [ - decompress( + np.frombuffer( + bytes( + decompress( + packet_data[:byte_count], + compression_algorithm, + ) + ), + dtype=">u4", # Big endian + ) + # For newer packet versions, the decompressed data needs to be converted to + # uint32 + if packet_version > 1 + else decompress( packet_data[:byte_count], compression_algorithm, ) @@ -123,21 +138,21 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: # Reshape decompressed data to in below for loop: # (num_packets, num_species, esa_steps, collapse_shape[0](spin_sector)) num_species = len(species_names) - esa_steps = constants.NUM_ESA_STEPS + num_esa_steps = constants.NUM_ESA_STEPS collapse_shape = get_collapse_pattern_shape( sci_lut_data, view_tab_obj.sensor, view_tab_obj.collapse_table, ) - + num_spin_sectors = collapse_shape[0] species_data = np.array(decompressed_data, dtype=np.uint32).reshape( - num_packets, num_species, esa_steps, collapse_shape[0] + num_packets, num_species, num_esa_steps, num_spin_sectors ) # If data size is less than 128, pad with fillval to make it 128 half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data") - if len(half_spin_per_esa_step) < constants.NUM_ESA_STEPS: - pad_size = constants.NUM_ESA_STEPS - len(half_spin_per_esa_step) + if len(half_spin_per_esa_step) < num_esa_steps: + pad_size = num_esa_steps - len(half_spin_per_esa_step) half_spin_per_esa_step = np.concatenate( (np.array(half_spin_per_esa_step), np.full(pad_size, HALF_SPIN_FILLVAL)) ) @@ -158,19 +173,82 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: np.asarray(acquisition_time_per_step), (len(unpacked_dataset["acq_start_seconds"]), 1), ) + # ========== Apply NSO/RGFO Masking =========== + # After FSW changes on 20260129, The Lo L1A product contains variables that + # indicate the esa step and spin sector during which the RGFO or NSO limits are + # triggered. The spin sector variable ranges from 0-11 and is the instrument + # reported spin sector. The following algorithm defines when to assign NaN to the + # priority data product due to NSO + # operation: + # 1. For half_spin > nso_half_spin a set all data to NaN + # 2. For half_spin = nso_half_spin + # a. For spin_sector > nso_spin_sector a set all data to NaN + # b. For spin_sector = nso_spin_sector + # i. For esa_step > nso_esa_step a set all data to NaN # For every energy after nso_half_spin, set data to fill values + # For data before 20260129 ( packet_version <=1 ) set all data to NaN where + # half_spin > nso_half_spin + packet_versions = unpacked_dataset["packet_version"].values nso_half_spin = unpacked_dataset["nso_half_spin"].values - nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | ( - half_spin_per_esa_step == HALF_SPIN_FILLVAL - ) - species_mask = nso_mask[:, np.newaxis, :, np.newaxis] - species_mask = np.broadcast_to(species_mask, species_data.shape) + # TODO handle boundary days where the FSW changed halfway through the dataset. E.g + # Some packet_version = 1 and some = 2 + if packet_versions[0] <= 1: + # For half_spin >= NSO_half_spin, set to NaN + half_spin_mask = (half_spin_per_esa_step >= nso_half_spin[:, np.newaxis]) | ( + half_spin_per_esa_step == HALF_SPIN_FILLVAL + ) + species_mask = half_spin_mask[:, np.newaxis, :, np.newaxis] + species_mask = np.broadcast_to(species_mask, species_data.shape) + else: + # nso_spin_sector and nso_esa_step for comparison. Shape (epoch, 1, 1) + # to broadcast + nso_spin_sector = unpacked_dataset["nso_spin_sector"].values[ + :, np.newaxis, np.newaxis + ] + nso_esa_step = unpacked_dataset["nso_energy_step"].values[ + :, np.newaxis, np.newaxis + ] + # Create arrays for spin sectors and esa steps to compare with nso values. + # Shape (1, 1, spin_sector) and (1, esa_step, 1) + spin_sectors = np.arange(num_spin_sectors)[np.newaxis, np.newaxis, :] + esa_steps = np.arange(num_esa_steps)[np.newaxis, :, np.newaxis] + # Create a mask for half_spin > nso_half_spin. Shape (epoch, esa_step)) + # This will be used below to set half_spin_per_esa_step to fillval and + # acquisition_time_per_step to NaN for those steps. + half_spin_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | ( + half_spin_per_esa_step == HALF_SPIN_FILLVAL + ) + # Create a mask for the boundary condition where half_spin == nso_half_spin. + at_boundary = ( + half_spin_per_esa_step[:, :, np.newaxis] + == nso_half_spin[:, np.newaxis, np.newaxis] + ) + boundary_half_spin_mask = ( + at_boundary + & + # For spin_sector > nso_spin_sector, set to NaN + ( + (spin_sectors > nso_spin_sector) + | + # For spin_sector = nso_spin_sector and esa_step > nso_esa_step, + # set to NaN + ((spin_sectors == nso_spin_sector) & (esa_steps > nso_esa_step)) + ) + ) + # Combine masks. Shape (epoch, esa_step, spin_sector). This mask is True + # where data should be set to NaN + nso_mask = half_spin_mask[:, :, np.newaxis] | boundary_half_spin_mask + # Expand nso_mask to (epoch, 1, esa_step, spin_sector) to apply to species_data. + species_mask = np.broadcast_to( + nso_mask[:, np.newaxis, :, :], species_data.shape + ) + species_data = species_data.astype(np.float64) species_data[species_mask] = np.nan # Set half_spin_per_esa_step to (fillval) where nso_mask is True - half_spin_per_esa_step[nso_mask] = HALF_SPIN_FILLVAL + half_spin_per_esa_step[half_spin_mask] = HALF_SPIN_FILLVAL # Set acquisition_time_per_step to nan where nso_mask is True - acquisition_time_per_step[nso_mask] = np.nan + acquisition_time_per_step[half_spin_mask] = np.nan # ========== Create CDF Dataset with Metadata =========== cdf_attrs = ImapCdfAttributes() @@ -272,6 +350,33 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: "acquisition_time_per_esa_step", check_schema=False ), ) + # Rename vars + unpacked_dataset = unpacked_dataset.rename( + { + k: v + for k, v in [ + ("rgfo_energy_step", "rgfo_esa_step"), + ("nso_energy_step", "nso_esa_step"), + ] + if k in unpacked_dataset + } + ) + # These variables were added to the packet definition after 20260129, so they only + # exist in the unpacked dataset if packet_version > 1 + # If they don't exist, initialize them with fill val arrays since they won't be + # used in the NSO/RGFO masking logic but should still exist in l1a for SPDF + # compliance/consistency. + l1a_additional_vars = [ + "rgfo_spin_sector", + "rgfo_esa_step", + "nso_spin_sector", + "nso_esa_step", + ] + for var in l1a_additional_vars: + if var not in unpacked_dataset: + unpacked_dataset[var] = np.full( + unpacked_dataset.sizes["epoch"], fill_value=np.nan + ) # Carry over these variables from unpacked data to l1a_dataset l1a_carryover_vars = [ @@ -279,6 +384,7 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: "st_bias_gain_mode", "rgfo_half_spin", "nso_half_spin", + *l1a_additional_vars, ] # Loop through them since we need to set their attrs too for var in l1a_carryover_vars: @@ -287,7 +393,6 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes(var), ) - # Finally, add species data variables and their uncertainties for idx, species in enumerate(species_names): l1a_dataset[species] = xr.DataArray( diff --git a/imap_processing/codice/codice_l1a_lo_species.py b/imap_processing/codice/codice_l1a_lo_species.py index aebad5ad27..93e1c2e6ca 100644 --- a/imap_processing/codice/codice_l1a_lo_species.py +++ b/imap_processing/codice/codice_l1a_lo_species.py @@ -8,10 +8,16 @@ from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes from imap_processing.codice import constants -from imap_processing.codice.constants import HALF_SPIN_FILLVAL +from imap_processing.codice.constants import ( + HALF_SPIN_FILLVAL, + LO_IALIRT_VARIABLE_NAMES, + LO_NSW_SPECIES_VARIABLE_NAMES, + LO_SW_SPECIES_VARIABLE_NAMES, +) from imap_processing.codice.decompress import decompress from imap_processing.codice.utils import ( CODICEAPID, + CoDICECompression, ViewTabInfo, calculate_acq_time_per_step, get_codice_epoch_time, @@ -24,7 +30,7 @@ logger = logging.getLogger(__name__) -def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: +def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: # noqa: PLR0912 """ L1A processing code. @@ -64,33 +70,62 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: sensor=view_tab_info["sensor"], three_d_collapsed=view_tab_info["3d_collapse"], collapse_table=view_tab_info["collapse_table"], + compression=view_tab_info["compression"], ) - if view_tab_obj.sensor != 0: raise ValueError("Unsupported sensor ID for Lo species processing.") # ========= Decompress and Reshape Data =========== # Lookup SW or NSW species based on APID if view_tab_obj.apid == CODICEAPID.COD_LO_SW_SPECIES_COUNTS: - species_names = sci_lut_data["data_product_lo_tab"]["0"]["species"]["sw"][ - "species_names" - ] + actual_species_names = sci_lut_data["data_product_lo_tab"]["0"]["species"][ + "sw" + ]["species_names"] + desired_species_names = set( + sci_lut_data["data_product_lo_tab"]["0"]["species"]["sw"][ + "desired_species_names" + ] + + LO_SW_SPECIES_VARIABLE_NAMES + ) logical_source_id = "imap_codice_l1a_lo-sw-species" elif view_tab_obj.apid == CODICEAPID.COD_LO_NSW_SPECIES_COUNTS: - species_names = sci_lut_data["data_product_lo_tab"]["0"]["species"]["nsw"][ - "species_names" - ] + actual_species_names = sci_lut_data["data_product_lo_tab"]["0"]["species"][ + "nsw" + ]["species_names"] + desired_species_names = set( + sci_lut_data["data_product_lo_tab"]["0"]["species"]["nsw"][ + "desired_species_names" + ] + + LO_NSW_SPECIES_VARIABLE_NAMES + ) logical_source_id = "imap_codice_l1a_lo-nsw-species" + # Rename "cnoplus" to "junk" if we are processing NSW angular data. Although + # cnoplus is in desired, and actual species name in the LUT, it is referencing + # different "cnoplus" data his is to handle the bug in which the spacecraft was + # sending data down "off by one" and getting mislabeled. The cnoplus data we + # are referencing is actually data that we want to toss out and fill with + # fill vals. This only affects data before the LUT was updated + # (table_id 3978152295). + if table_id <= 3978152295: + actual_species_names = [ + "junk" if name == "cnoplus" else name for name in actual_species_names + ] elif view_tab_obj.apid == CODICEAPID.COD_LO_IAL: - species_names = sci_lut_data["data_product_lo_tab"]["0"]["ialirt"]["sw"][ + actual_species_names = sci_lut_data["data_product_lo_tab"]["0"]["ialirt"]["sw"][ "species_names" ] + desired_species_names = set( + sci_lut_data["data_product_lo_tab"]["0"]["ialirt"]["sw"][ + "desired_species_names" + ] + + LO_IALIRT_VARIABLE_NAMES + ) # Note: ialirt does not produce a cdf for l1a so this is arbitrary. logical_source_id = "imap_codice_l1a_lo-sw-species" else: raise ValueError(f"Unknown apid {view_tab_obj.apid} in Lo species processing.") - compression_algorithm = constants.LO_COMPRESSION_ID_LOOKUP[view_tab_obj.view_id] + compression_algorithm = CoDICECompression(view_tab_obj.compression) # Decompress data using byte count information from decommed data binary_data_list = unpacked_dataset["data"].values byte_count_list = unpacked_dataset["byte_count"].values @@ -116,7 +151,7 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: # (num_packets, num_species, esa_steps, *collapsed_shape) # where collapsed_shape is usually (1,) for Lo species. num_packets = len(binary_data_list) - num_species = len(species_names) + num_species = len(actual_species_names) esa_steps = constants.NUM_ESA_STEPS species_data = np.array(decompressed_data, dtype=np.uint32).reshape( num_packets, num_species, esa_steps, *collapsed_shape @@ -150,7 +185,7 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: ) # For every energy after nso_half_spin, set data to fill values nso_half_spin = unpacked_dataset["nso_half_spin"].values - nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | ( + nso_mask = (half_spin_per_esa_step >= nso_half_spin[:, np.newaxis]) | ( half_spin_per_esa_step == HALF_SPIN_FILLVAL ) species_mask = nso_mask[:, np.newaxis, :, np.newaxis] @@ -279,6 +314,33 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: "acquisition_time_per_esa_step", check_schema=False ), ) + # Rename vars + unpacked_dataset = unpacked_dataset.rename( + { + k: v + for k, v in [ + ("rgfo_energy_step", "rgfo_esa_step"), + ("nso_energy_step", "nso_esa_step"), + ] + if k in unpacked_dataset + } + ) + # These variables were added to the packet definition after 20260129, so they only + # exist in the unpacked dataset if packet_version > 1 + # If they don't exist, initialize them with fill val arrays since they won't be + # used in the NSO/RGFO masking logic but should still exist in l1a for SPDF + # compliance/consistency. + l1a_additional_vars = [ + "rgfo_spin_sector", + "rgfo_esa_step", + "nso_spin_sector", + "nso_esa_step", + ] + for var in l1a_additional_vars: + if var not in unpacked_dataset: + unpacked_dataset[var] = np.full( + unpacked_dataset.sizes["epoch"], fill_value=np.nan + ) # Carry over these variables from unpacked data to l1a_dataset l1a_carryover_vars = [ @@ -286,6 +348,8 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: "st_bias_gain_mode", "rgfo_half_spin", "nso_half_spin", + "packet_version", + *l1a_additional_vars, ] # Loop through them since we need to set their attrs too for var in l1a_carryover_vars: @@ -294,18 +358,25 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch",), attrs=cdf_attrs.get_variable_attributes(var), ) - # Finally, add species data variables and their uncertainties - for idx, species in enumerate(species_names): - if view_tab_obj.apid == CODICEAPID.COD_LO_SW_SPECIES_COUNTS and species in [ - "heplus", - "cnoplus", - ]: - species_attrs = cdf_attrs.get_variable_attributes("lo-pui-species-attrs") - unc_attrs = cdf_attrs.get_variable_attributes("lo-pui-species-unc-attrs") + # Loop through the species we want in the final dataset (desired_species_names) and + # add them if they exist in the actual species names from the LUT. + # This is to handle the bug in which the spacecraft was sending data down "off by + # one" and getting mislabeled. + for species in desired_species_names: + if species not in actual_species_names: + logger.warning( + f"Desired species {species} not found in actual species names from " + f"LUT. This species will be filled with fill values in the final " + f"dataset. Actual species names: {actual_species_names}" + ) + species_data_individual = np.full(species_data[:, 0, :, :].shape, np.nan) else: - species_attrs = cdf_attrs.get_variable_attributes("lo-species-attrs") - unc_attrs = cdf_attrs.get_variable_attributes("lo-species-unc-attrs") + species_idx = actual_species_names.index(species) + species_data_individual = species_data[:, species_idx, :, :] + + species_attrs = cdf_attrs.get_variable_attributes("lo-species-attrs") + unc_attrs = cdf_attrs.get_variable_attributes("lo-species-unc-attrs") direction = ( "Sunward" @@ -320,7 +391,7 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: species=species, direction=direction ) l1a_dataset[species] = xr.DataArray( - species_data[:, idx, :, :], + species_data_individual, dims=("epoch", "esa_step", "spin_sector"), attrs=species_attrs, ) @@ -336,5 +407,4 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset: dims=("epoch", "esa_step", "spin_sector"), attrs=unc_attrs, ) - return l1a_dataset diff --git a/imap_processing/codice/codice_l1b.py b/imap_processing/codice/codice_l1b.py index 2e109e4203..e38d1551b8 100644 --- a/imap_processing/codice/codice_l1b.py +++ b/imap_processing/codice/codice_l1b.py @@ -47,7 +47,6 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: variables_to_convert = getattr( constants, f"{descriptor.upper().replace('-', '_')}_VARIABLE_NAMES" ) - if descriptor.startswith("lo-"): # Calculate energy_table using voltage_table and k_factor energy_attrs = dataset["voltage_table"].attrs | { @@ -87,6 +86,8 @@ def convert_to_rates(dataset: xr.Dataset, descriptor: str) -> np.ndarray: "st_bias_gain_mode", "spin_period", "voltage_table", + "nso_esa_step", + "nso_spin_sector", # TODO: undo this when I get new validation file from Joey # "acquisition_time_per_esa_step", ] @@ -190,6 +191,7 @@ def process_codice_l1b(file_path: Path) -> xr.Dataset: # Update the global attributes l1b_dataset.attrs = cdf_attrs.get_global_attributes(dataset_name) + return convert_to_rates( l1b_dataset, descriptor, diff --git a/imap_processing/codice/codice_l2.py b/imap_processing/codice/codice_l2.py index 3af4fb41d3..f15a698f23 100644 --- a/imap_processing/codice/codice_l2.py +++ b/imap_processing/codice/codice_l2.py @@ -279,7 +279,7 @@ def get_species_efficiency(species: str, efficiency: pd.DataFrame) -> xr.DataArr def compute_geometric_factors( - dataset: xr.Dataset, geometric_factor_lookup: dict + dataset: xr.Dataset, geometric_factor_lookup: dict, angular_product: bool = False ) -> xr.DataArray: """ Calculate geometric factors needed for intensity calculations. @@ -290,17 +290,29 @@ def compute_geometric_factors( If the half-spin value is less than the corresponding rgfo_half_spin value, the geometric factor is set to 0.75 (full mode); otherwise, it is set to 0.5 - (reduced mode). + (reduced mode). If the data is from after November 24th 2025, then reduced + mode is no longer applied and the geometric factor is always set to full mode. NOTE: Half spin values are associated with ESA steps which corresponds to the index of the energy_per_charge dimension that is between 0 and 127. + NOTE: If packet_version = 2, the Lo L1B product now contains variables that indicate + the esa step and spin sector during which the RGFO or NSO limits are triggered. + The spin sector variable ranges from 0-11 and is the instrument reported spin + sector. In the following algorithm, spin_angle refers to the L1B angular bin + (0 – 23) which is despun and spin_sector refers to the non-despun spin sector + reported from the instrument (0-11). + Parameters ---------- dataset : xarray.Dataset The L2 dataset containing rgfo_half_spin data variable. geometric_factor_lookup : dict A dict with a full and reduced mode array with shape (esa_steps, position). + angular_product : bool + Whether the product being processed is an angular product. If True, then + the geometric factor calculation has additional steps to determine the exact + rgfo boundary. Returns ------- @@ -308,25 +320,62 @@ def compute_geometric_factors( A 3D array of geometric factors with shape (epoch, esa_steps, positions). """ # Get half spin values per esa step from the dataset - half_spin_per_esa_step = dataset.half_spin_per_esa_step.values - + # Add a new dim for spin_sector + half_spin_per_esa_step = dataset.half_spin_per_esa_step.values[:, :, np.newaxis] # Expand dimensions to compare each rgfo_half_spin value against - # all half_spin_values - rgfo_half_spin = dataset.rgfo_half_spin.data[:, np.newaxis] # Shape: (epoch, 1) - # Perform the comparison and calculate modes - # Modes will be true (reduced mode) anywhere half_spin > rgfo_half_spin otherwise - # false (full mode) - # TODO: The mode calculation will need to be revisited after FW changes in january - # 2026. We also need to fix this on days when the sci Lut changes. + # all half_spin_values and spin_sectors. Shape: (epoch, 1, 1) + rgfo_half_spin = dataset.rgfo_half_spin.data[:, np.newaxis, np.newaxis] # After November 24th 2025 we need to do this step a different way. start_date = dataset.attrs.get("Logical_file_id", None) if start_date is None: raise ValueError("Dataset is missing Logical_file_id attribute.") processing_date = datetime.datetime.strptime(start_date.split("_")[4], "%Y%m%d") date_switch = datetime.datetime(2025, 11, 24) + fsw_switch_date = datetime.datetime(2026, 1, 29) # Only consider valid half spins valid_half_spin = half_spin_per_esa_step != HALF_SPIN_FILLVAL - if processing_date < date_switch: + # TODO: Fix this calculation on days when the sci Lut changes. There may be + # different packet versions in the same dataset. + # Perform the comparison and calculate modes + if angular_product and dataset.packet_version.data[0] > 1: + # For angular products with packet version > 1, we have spin sector information + # to determine the exact boundary of the RGFO mode. Shape: (epoch, 1, 1) + # Mod by 12 to convert rgfo_spin_sector to half spin sector range of 0-11 + rgfo_spin_sector = dataset.rgfo_spin_sector.data[:, np.newaxis, np.newaxis] % 12 + rgfo_esa_step = dataset.rgfo_esa_step.data[:, np.newaxis, np.newaxis] + # Shape: (1, 1, spin_sector (24)) + spin_sector = dataset.spin_sector.data[np.newaxis, np.newaxis, :] + # Shape: (1, esa_step (128), 1) + esa_step = dataset.esa_step.data[np.newaxis, :, np.newaxis] + at_boundary = half_spin_per_esa_step == rgfo_half_spin + + modes = ( + # Reduced mode (True) is applied where: + # 1. Half spin is valid. + valid_half_spin + & ( + # 2. Half spin is greater than rgfo_half_spin. + (half_spin_per_esa_step > rgfo_half_spin) + | ( + # 3. Where half_spin_per_esa_step equals rgfo_half_spin AND + at_boundary + & ( + # a. The spin sector mod 12 is greater than rgfo_spin_sector + ((spin_sector % 12) > rgfo_spin_sector) + | + # b. OR the spin sector mod 12 equals rgfo_spin_sector AND the + # esa step is greater than rgfo_esa_step + ( + ((spin_sector % 12) == rgfo_spin_sector) + & (esa_step > rgfo_esa_step) + ) + ) + ) + ) + ) + elif (processing_date < date_switch) | (processing_date >= fsw_switch_date): + # Modes will be true (reduced mode) anywhere half_spin > rgfo_half_spin + # otherwise false (full mode) modes = ( valid_half_spin & (half_spin_per_esa_step > rgfo_half_spin) @@ -337,14 +386,26 @@ def compute_geometric_factors( # always use the full geometric factor lookup. modes = np.zeros_like(half_spin_per_esa_step, dtype=bool) - # Get the geometric factors based on the modes - gf = np.where( - modes[:, :, np.newaxis], # Shape (epoch, esa_step, 1) - geometric_factor_lookup["reduced"], # Shape (1, esa_step, 24) - reduced mode - geometric_factor_lookup["full"], # Shape (1, esa_step, 24) - full mode - ) # Shape: (epoch, esa_step, inst_az) - - return xr.DataArray(gf, dims=("epoch", "esa_step", "inst_az")) + # If the last dimension of modes is 24, we have spin sector information and + # need to apply the geometric factor lookup differently + if modes.shape[-1] == 24: + # Get the geometric factors based on the modes + # expand the mode array to include a dimension for "inst_az" (also shape=24) + modes = modes[:, :, :, np.newaxis] # Shape (epoch, esa_step, 24, 1) + gf = np.where( + modes, # Shape (epoch, esa_step, 24, 1) + geometric_factor_lookup["reduced"][:, np.newaxis, :], # (esa_step, 1, 24) + geometric_factor_lookup["full"][:, np.newaxis, :], # (esa_step, 1, 24) + ) # Shape: (epoch, esa_step, spin_sector, inst_az) + return xr.DataArray(gf, dims=("epoch", "esa_step", "spin_sector", "inst_az")) + else: + # Get the geometric factors based on the modes + gf = np.where( + modes, # Shape (epoch, esa_step, 1) + geometric_factor_lookup["reduced"], # (esa_step, 24) + geometric_factor_lookup["full"], # (esa_step, 24) + ) # Shape: (epoch, esa_step, inst_az) + return xr.DataArray(gf, dims=("epoch", "esa_step", "inst_az")) def calculate_intensity( @@ -397,7 +458,6 @@ def calculate_intensity( # efficiency. # intensity = species_rate / (gm * eff * esa_step) for position and spin angle for species in species_list: - # Select the relevant positions for the species from the efficiency LUT # Shape: (epoch, esa_step, inst_az) species_eff = get_species_efficiency(species, efficiency).isel( inst_az=positions @@ -409,15 +469,11 @@ def calculate_intensity( if average_across_positions: # Take the mean efficiency across positions species_eff = species_eff.mean(dim="inst_az") - # Shape: (epoch, esa_step, inst_az) or # (epoch, esa_step) if averaged denominator = scalar * geometric_factors * species_eff * dataset["energy_table"] if species not in dataset: - logger.warning( - f"Species {species} not found in dataset. Filling with NaNS." - ) - dataset[species] = np.full(dataset["esa_step"].data.shape, np.nan) + raise ValueError(f"Species {species} not found in dataset.") else: # Only replace the data with calculated intensity to keep the attributes dataset[species].data = (dataset[species] / denominator).data @@ -491,13 +547,29 @@ def process_lo_species_intensity( species_attrs = cdf_attrs.get_variable_attributes("lo-species-attrs") unc_attrs = cdf_attrs.get_variable_attributes("lo-species-unc-attrs") + # add uncertainties to species list + species_list = species_list + [f"unc_{var}" for var in species_list] # update species attrs for species in species_list: - attrs = unc_attrs if "unc" in unc_attrs else species_attrs + attrs = unc_attrs if "unc" in species else species_attrs # Replace {species} and {direction} in attrs attrs = apply_replacements_to_attrs(attrs, {"species": species}) dataset[species].attrs.update(attrs) + # Since the RGFO mode is implemented within a half-spin at a given esa step and + # spin sector and since the species data is summed over all spin sectors, the data + # during this half spin cannot be de-convolved. Thus, the intensity during the + # half_spin = RGFO_half_spin should be set to fill values. + half_spin_boundary = ( + dataset.half_spin_per_esa_step.data + == dataset.rgfo_half_spin.data[:, np.newaxis] + ) + # Add an extra dimension to match the species data shape (361, 128, 1) + half_spin_boundary = half_spin_boundary[:, :, np.newaxis] + + for species in species_list: + dataset[species].data[half_spin_boundary] = np.nan + return dataset @@ -615,7 +687,6 @@ def process_lo_angular_intensity( dataset[species].values[ :, b_inds[:, np.newaxis], spin_inds_2, position_index ] = species_data[:, b_inds[:, np.newaxis], spin_inds_1, position_index] - cdf_attrs = ImapCdfAttributes() cdf_attrs.add_instrument_variable_attrs("codice", "l2-lo-angular") species_attrs = cdf_attrs.get_variable_attributes("lo-angular-attrs") @@ -649,7 +720,6 @@ def process_lo_angular_intensity( dataset["spin_sector"].attrs = cdf_attrs.get_variable_attributes( "spin_sector", check_schema=False ) - return dataset @@ -1154,7 +1224,18 @@ def process_lo_direct_events(dependencies: ProcessingInputCollection) -> xr.Data kev.astype(np.float32).reshape(l2_dataset["energy_step"].shape), ) # Drop unused variables - vars_to_drop = ["spare", "sw_bias_gain_mode", "st_bias_gain_mode", "k_factor"] + vars_to_drop = [ + "spare", + "sw_bias_gain_mode", + "st_bias_gain_mode", + "k_factor", + "rgfo_esa_step", + "rgfo_spin_sector", + "rgfo_half_spin", + "nso_esa_step", + "nso_spin_sector", + "nso_half_spin", + ] l2_dataset = l2_dataset.drop_vars(vars_to_drop) # Update variable attributes l2_dataset.attrs.update( @@ -1273,7 +1354,17 @@ def process_hi_direct_events(dependencies: ProcessingInputCollection) -> xr.Data dims=l2_dataset["tof"].dims, ).astype(np.float32) # Drop unused variables - vars_to_drop = ["spare", "sw_bias_gain_mode", "st_bias_gain_mode"] + vars_to_drop = [ + "spare", + "sw_bias_gain_mode", + "st_bias_gain_mode", + "rgfo_esa_step", + "rgfo_spin_sector", + "rgfo_half_spin", + "nso_esa_step", + "nso_spin_sector", + "nso_half_spin", + ] l2_dataset = l2_dataset.drop_vars(vars_to_drop) # Update variable attributes l2_dataset.attrs.update( @@ -1347,11 +1438,11 @@ def process_codice_l2( geometric_factor_lookup = get_geometric_factor_lut(dependencies) efficiency_lookup = get_efficiency_lut(dependencies) - geometric_factors = compute_geometric_factors( - l2_dataset, geometric_factor_lookup - ) if dataset_name == "imap_codice_l2_lo-sw-species": + geometric_factors = compute_geometric_factors( + l2_dataset, geometric_factor_lookup + ) # Filter the efficiency lookup table for solar wind efficiencies efficiencies = efficiency_lookup[efficiency_lookup["product"] == "sw"] # Calculate the pickup ion sunward solar wind intensities using equation @@ -1376,6 +1467,9 @@ def process_codice_l2( cdf_attrs.get_global_attributes("imap_codice_l2_lo-sw-species") ) elif dataset_name == "imap_codice_l2_lo-nsw-species": + geometric_factors = compute_geometric_factors( + l2_dataset, geometric_factor_lookup + ) # Filter the efficiency lookup table for non-solar wind efficiencies efficiencies = efficiency_lookup[efficiency_lookup["product"] == "nsw"] # Calculate the non-sunward species intensities using equation @@ -1391,6 +1485,9 @@ def process_codice_l2( cdf_attrs.get_global_attributes("imap_codice_l2_lo-nsw-species") ) elif dataset_name == "imap_codice_l2_lo-sw-angular": + geometric_factors = compute_geometric_factors( + l2_dataset, geometric_factor_lookup, angular_product=True + ) efficiencies = efficiency_lookup[efficiency_lookup["product"] == "sw"] # Calculate the sunward solar wind angular intensities using equation # described in section 11.2.2 of algorithm document. @@ -1405,6 +1502,9 @@ def process_codice_l2( cdf_attrs.get_global_attributes("imap_codice_l2_lo-sw-angular") ) if dataset_name == "imap_codice_l2_lo-nsw-angular": + geometric_factors = compute_geometric_factors( + l2_dataset, geometric_factor_lookup, angular_product=True + ) # Calculate the non sunward angular intensities efficiencies = efficiency_lookup[efficiency_lookup["product"] == "nsw"] l2_dataset = process_lo_angular_intensity( @@ -1417,14 +1517,6 @@ def process_codice_l2( l2_dataset.attrs.update( cdf_attrs.get_global_attributes("imap_codice_l2_lo-nsw-angular") ) - # Drop vars not needed in L2 - l2_dataset = l2_dataset.drop_vars( - [ - "acquisition_time_per_esa_step", - "rgfo_half_spin", - "half_spin_per_esa_step", - ] - ) if dataset_name in [ "imap_codice_l2_hi-counters-singles", @@ -1474,6 +1566,19 @@ def process_codice_l2( # See section 11.1.2 of algorithm document l2_dataset = process_lo_direct_events(dependencies) - # logger.info(f"\nFinal data product:\n{l2_dataset}\n") + # make sure we drop vars not needed in l2 products + vars_to_drop = [ + "acquisition_time_per_esa_step", + "rgfo_half_spin", + "half_spin_per_esa_step", + "rgfo_esa_step", + "rgfo_spin_sector", + "packet_version", + ] + for var in vars_to_drop: + if var in l2_dataset.data_vars: + l2_dataset = l2_dataset.drop_vars(var) + + logger.info(f"\nFinal data product:\n{l2_dataset}\n") return l2_dataset diff --git a/imap_processing/codice/constants.py b/imap_processing/codice/constants.py index 837f12184b..5d61424dd6 100644 --- a/imap_processing/codice/constants.py +++ b/imap_processing/codice/constants.py @@ -17,7 +17,7 @@ import numpy as np -from imap_processing.codice.utils import CODICEAPID, CoDICECompression +from imap_processing.codice.utils import CODICEAPID # -------L1A Constants------- # Numerical constants @@ -64,8 +64,8 @@ "oplus7", "oplus8", "mg", - "fe_loq", "fe_hiq", + "fe_loq", ] HI_IALIRT_VARIABLE_NAMES = ["h"] # Mass over charge (AMU/e) @@ -238,34 +238,6 @@ }, } -# Compression ID lookup tables -# The key is the view_id and the value is the ID for the compression algorithm -# (see utils.CoDICECompression to see how the values correspond) -# These are defined in the "Views" tab of the "*-SCI-LUT-*.xml" spreadsheet that -# largely defines CoDICE processing. -LO_COMPRESSION_ID_LOOKUP = { - 0: CoDICECompression.PACK_24_BIT, - 1: CoDICECompression.LOSSY_B_LOSSLESS, - 2: CoDICECompression.LOSSY_B_LOSSLESS, - 3: CoDICECompression.LOSSY_A_LOSSLESS, - 4: CoDICECompression.LOSSY_A_LOSSLESS, - 5: CoDICECompression.LOSSY_A_LOSSLESS, - 6: CoDICECompression.LOSSY_A_LOSSLESS, - 7: CoDICECompression.LOSSY_A_LOSSLESS, - 8: CoDICECompression.LOSSY_A_LOSSLESS, -} -HI_COMPRESSION_ID_LOOKUP = { - 0: CoDICECompression.LOSSY_A, - 1: CoDICECompression.LOSSY_A, - 2: CoDICECompression.LOSSY_A, - 3: CoDICECompression.LOSSY_B_LOSSLESS, - 4: CoDICECompression.LOSSY_B_LOSSLESS, - 5: CoDICECompression.LOSSY_A_LOSSLESS, - 6: CoDICECompression.LOSSY_A_LOSSLESS, - 7: CoDICECompression.LOSSY_A_LOSSLESS, - 8: CoDICECompression.LOSSY_A_LOSSLESS, - 9: CoDICECompression.LOSSY_A_LOSSLESS, -} # Lookup tables for Lossy decompression algorithms "A" and "B" # These were provided by Greg Dunn via his sohis_cdh_utils.v script and then @@ -793,8 +765,8 @@ HI_ACQUISITION_TIME = 0.59916 # TODO: in the future, read from sci-lut -LO_SW_ANGULAR_VARIABLE_NAMES = ["hplus", "heplusplus", "oplus6", "fe_loq"] -LO_NSW_ANGULAR_VARIABLE_NAMES = ["heplusplus"] +LO_SW_ANGULAR_VARIABLE_NAMES = ["hplus", "heplusplus", "oplus6", "fe_loq", "heplus"] +LO_NSW_ANGULAR_VARIABLE_NAMES = ["heplusplus", "heplus"] LO_SW_PRIORITY_VARIABLE_NAMES = [ "p0_tcrs", "p1_hplus", @@ -911,8 +883,8 @@ "ne", "mg", "si", - "fe_loq", "fe_hiq", + "fe_loq", ] LO_SW_PICKUP_ION_SPECIES_VARIABLE_NAMES = [ "heplus", diff --git a/imap_processing/codice/packet_definitions/codice_packet_definition.xml b/imap_processing/codice/packet_definitions/imap_codice_packet-definition_20250101_v001.xml similarity index 100% rename from imap_processing/codice/packet_definitions/codice_packet_definition.xml rename to imap_processing/codice/packet_definitions/imap_codice_packet-definition_20250101_v001.xml diff --git a/imap_processing/codice/packet_definitions/imap_codice_packet-definition_20260129_v001.xml b/imap_processing/codice/packet_definitions/imap_codice_packet-definition_20260129_v001.xml new file mode 100644 index 0000000000..c6e0646b26 --- /dev/null +++ b/imap_processing/codice/packet_definitions/imap_codice_packet-definition_20260129_v001.xml @@ -0,0 +1,5231 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + CCSDS Packet Version Number (always 0) + + + CCSDS Packet Type Indicator (0=telemetry) + + + CCSDS Packet Secondary Header Flag (always 1) + + + CCSDS Packet Application Process ID + + + CCSDS Packet Grouping Flags (3=not part of group) + + + CCSDS Packet Sequence Count (increments with each new packet) + + + CCSDS Packet Length (number of bytes after Packet length minus 1) + + + EUROPA CLIPPER SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + NUMBER OF COMMANDS THAT HAVE BEEN EXECUTED. COUNTS 0-255, THEN ROLLS OVER TO 0. RESET VIA CLR_LATCHED_SINGLE(COMMAND_COUNTS) [ALSO RESETS CMDJRCT, CMDACC, ITF_ERROR COUNTS) + + + NUMBER OF COMMANDS THAT HAVE BEEN REJECTED. COUNTS 0-255, THEN ROLLS OVER TO 0. RESET VIA CLR_LATCHED_SINGLE(COMMAND_COUNTS) [ALSO RESETS CMDEXE, CMDACC, ITF_ERROR COUNTS) + + + OPCODE OF THE LAST EXECUTED COMMAND + + + CURRENT OPERATING MODE + + + STATE OF THE MEMORY-OPERATIONS HANDLER + + + STATE OF THE MEMORY-DUMP HANDLER (BUSY/IDLE) + + + NUMBER OF ITF ERRORS THAT HAVE BEEN DETECTED; COUNTS 0-3, THEN ROLLS OVER TO 0. RESET VIA CLR_LATCHED_SINGLE(COMMAND_COUNTS) [ALSO RESETS CMDEXE, CMDJRCT, CMDACC COUNTS) + + + NUMBER OF SPIN PULSES RECEIVED + + + NUMBER OF MISSED PPS PULSES. COUNTS 0-3, THEN FREEZES AT 3. RESET VIA CLR_LATCHED_SINGLE(PPS_STATS) + + + NUMBER OF TIMES THE WATCHDOG HAS TIMED OUT. + + + CURRENT STATUS OF THE HV SAFE/DISABLE PLUGS: +- SAFE: ALL HVPS OUTPUTS PROVIDE 1/10 THE COMMANDED VOLTAGE +- DIS: ALL HVPS OUTPUTS PROVIDE 0V, REGARDLESS OF COMMANDED VOLTAGE +- FULL: HVPS OUTPUTS PROVIDE THE FULL COMMANDED VOLTAGE + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + INDICATES THE CURRENT OPERATIONAL STATE OF THE LO ESA SWEEP: +- NORMAL - BOTH ESAS ARE TRACKING TOGETHER +- RGFO - REDUCED GAIN FACTOR OPERATION; ESA-A IS REDUCED IN ORDER TO REDUCE THE GAIN FACTOR AND ALLOW FEWER IONS INTO THE DETECTOR +- NSO - NO SCAN OPERATION; BOTH ESAS ARE RETURNED TO A HIGH-ENERGY SETTING AND NO SCANNING IS DONE FOR THE REMAINDER OF THE ESA SWEEP + + + + + + + + + + + + + + + + + + + + + + + + + + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + ALARM PERSISTENCE = 3 IN OASIS + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + EACH BIT INDICATES WHETHER THE CORRESPONDING MACRO IS CURRENTLY RUNNING (E.G. BIT 1 WILL BE SET IF MACRO 1 IS RUNNING) + + + INDICATES WHETHER ANY CATEGORY 1 LIMITS HAVE TRIGGERED. + + 2 BITS: 0: NO TRIGGERS; 1: ONE TRIGGER, 2: TWO TRIGGERS, 3: MORE THAN TWO TRIGGERS + + + INDICATES WHETHER ANY CATEGORY 2 LIMITS HAVE TRIGGERED + + + INDICATES WHETHER ANY CATEGORY 3 LIMITS HAVE TRIGGERED + + + INDICATES WHETHER ANY CATEGORY 4 LIMITS HAVE TRIGGERED + + + INDICATES WHETHER ANY CATEGORY 5 LIMITS HAVE TRIGGERED + + + INDICATES WHETHER ANY CATEGORY 6 LIMITS HAVE TRIGGERED + + + INDICATES WHETHER ANY CATEGORY 7 LIMITS HAVE TRIGGERED + + + INDICATES WHETHER ANY CATEGORY 8 LIMITS HAVE TRIGGERED + + + INDICATES WHETHER THE MOST RECENT TRIGGER WAS A MINIMUM OR MAXIMUM LIMIT + + + INDICATES THE TABLE INDEX OF THE MOST RECENT FDC TRIGGER + + + INDICATES THE ACTION THAT WAS TAKEN FOR THE MOST RECENT FDC TRIGGER + + + CURRENT INDEX FOR THE ROUND ROBIN PARAMETER REPORTING. THE ROUND ROBIN MECHANISM REPORTS ONE VALUE FROM THE PARAMETER TABLE EACH TIME THIS PACKET IS GENERATED. + + + PARAMETER VALUE CORRESPONDING TO THE CURRENT ROUND_ROBIN_INDEX VALUE. + + + INDICATES WHETHER FSW CONTROL OF THE OPERATIONAL HEATER IS ENABLED + + + INDICATES THE CURRENT STATE OF THE PHYSICAL HEATER OUTPUT + + + INDICATES THE CURRENT STATE OF THE PHYSICAL HEATER OUTPUT + + + SPARE FOR ALIGNMENT + + + CPU PERCENT IDLE TIME. BASED ON THE MEMORY SCRUB TASK PERCENT, SINCE IT CONSUMES ALL IDLE CYCLES. + + + + + + + + + + + + + + + + + + + + + + + + SPARE FOR ALIGNMENT + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + OPTIONALLY COMPRESSED ARRAY OF EVENT DATA + +FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: +- FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/E-STEP/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? +- COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,E-STEP, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS +- SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? + +EACH EVENT CONSISTS OF: +- 7-BIT E-STEP +- 10-BIT TOF +- 9-BIT APD ENERGY +- 7-BIT SPIN ANGLE +- 5-BIT POSITION +- 5-BIT APD-ID +- 1-BIT APD-GAIN +- 2-BIT PHA TYPE +- 3-BIT PRIORITY RANGE + +TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 8 PRIORITY-COUNTERS INCLUDED): + +128 ENERGIES X 24 POSITIONS X 12 SPIN-ANGLES X 32 BITS X 8 COUNTERS = 9,437,184 BITS (1,179,648 BYTES) + +REALISTICALLY, DATA IS AGGRESSIVELY COLLAPSED AND COMPRESSED, AND ONLY A SUBSET OF THE 32 SPECIES COUNTERS WILL BE INCLUDED, SO THIS DATA FIELD WILL BE MUCH SMALLER THAN THE MAXIMUM. + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X ENERGY DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 32 SPECIES-COUNTERS INCLUDED): + +128 ENERGIES X 24 POSITIONS X 12 SPIN-ANGLES X 32 BITS X 32 COUNTERS = 37,748,736 BITS (4,718,592 BYTES) + +REALISTICALLY, DATA IS AGGRESSIVELY COLLAPSED AND COMPRESSED, AND ONLY A SUBSET OF THE 32 SPECIES COUNTERS WILL BE INCLUDED, SO THIS DATA FIELD WILL BE MUCH SMALLER THAN THE MAXIMUM. + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X ENERGY DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 32 SPECIES-COUNTERS INCLUDED): + +128 ENERGIES X 24 POSITIONS X 12 SPIN-ANGLES X 32 BITS X 32 COUNTERS = 37,748,736 BITS (4,718,592 BYTES) + +REALISTICALLY, DATA IS AGGRESSIVELY COLLAPSED AND COMPRESSED, AND ONLY A SUBSET OF THE 32 SPECIES COUNTERS WILL BE INCLUDED, SO THIS DATA FIELD WILL BE MUCH SMALLER THAN THE MAXIMUM. + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X ENERGY DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 32 SPECIES-COUNTERS INCLUDED): + +128 ENERGIES X 24 POSITIONS X 12 SPIN-ANGLES X 32 BITS X 32 COUNTERS = 37,748,736 BITS (4,718,592 BYTES) + +REALISTICALLY, DATA IS AGGRESSIVELY COLLAPSED AND COMPRESSED, AND ONLY A SUBSET OF THE 32 SPECIES COUNTERS WILL BE INCLUDED, SO THIS DATA FIELD WILL BE MUCH SMALLER THAN THE MAXIMUM. + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X ENERGY DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 32 SPECIES-COUNTERS INCLUDED): + +128 ENERGIES X 24 POSITIONS X 12 SPIN-ANGLES X 32 BITS X 32 COUNTERS = 37,748,736 BITS (4,718,592 BYTES) + +REALISTICALLY, DATA IS AGGRESSIVELY COLLAPSED AND COMPRESSED, AND ONLY A SUBSET OF THE 32 SPECIES COUNTERS WILL BE INCLUDED, SO THIS DATA FIELD WILL BE MUCH SMALLER THAN THE MAXIMUM. + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X ENERGY DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 8 PRIORITY-COUNTERS INCLUDED): + +128 ENERGIES X 24 POSITIONS X 12 SPIN-ANGLES X 32 BITS X 8 COUNTERS = 9,437,184 BITS (1,179,648 BYTES) + +REALISTICALLY, DATA IS AGGRESSIVELY COLLAPSED AND COMPRESSED, AND ONLY A SUBSET OF THE 32 SPECIES COUNTERS WILL BE INCLUDED, SO THIS DATA FIELD WILL BE MUCH SMALLER THAN THE MAXIMUM. + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X ENERGY DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 22 INSTRUMENT RATE COUNTERS INCLUDED): + +128 ENERGIES X 24 POSITIONS X 12 SPIN-ANGLES X 32 BITS X 22 COUNTERS = 25,952,256 BITS (3,244,032 BYTES) + +REALISTICALLY, DATA IS AGGRESSIVELY COLLAPSED AND COMPRESSED, AND ONLY A SUBSET OF THE 32 SPECIES COUNTERS WILL BE INCLUDED, SO THIS DATA FIELD WILL BE MUCH SMALLER THAN THE MAXIMUM. + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X ENERGY DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 22 INSTRUMENT RATE COUNTERS INCLUDED): + +128 ENERGIES X 24 POSITIONS X 12 SPIN-ANGLES X 32 BITS X 22 COUNTERS = 25,952,256 BITS (3,244,032 BYTES) + +REALISTICALLY, DATA IS AGGRESSIVELY COLLAPSED AND COMPRESSED, AND ONLY A SUBSET OF THE 32 SPECIES COUNTERS WILL BE INCLUDED, SO THIS DATA FIELD WILL BE MUCH SMALLER THAN THE MAXIMUM. + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X ENERGY DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + OPTIONALLY COMPRESSED ARRAY OF EVENT DATA + +FORMAT IS TBD; SOME CONSIDERATIONS/OPTIONS: +- FULL EVENTS HAVE A LOT OF REDUNDANT DATA (E.G. WILL HAVE MANY EVENTS WITH THE SAME PRIORITY/SPIN/SPIN PHASE INFORMATION). HOW WELL DOES COMPRESSION TO DEAL WITH THE REDUNDANCY? +- COULD INCLUDE MINI-HEADERS FOR EACH (PRIORITY,SPIN, SPIN-PHASE) GROUP AND STRIP THE REDUNDANT DATA FROM THE EVENTS +- SHOULD EVENTS BE TIGHTLY PACKED, OR CAN WE PAD OUT TO 64-BIT WORD BOUNDARIES? HOW WELL DOES COMPRESSION COMPENSATE FOR THE EXTRA BITS? + +EACH EVENT CONSISTS OF: +- 10-BIT TOF +- 9-BIT SSD ENERGY +- 2-BIT ENERGY RANGE +- 7-BIT SPIN ANGLE +- 4-BIT SSD POSITION +- 4-BIT SPIN NUMBER +- 2-BIT PHA TYPE + +TBD: EVENTS MAY BE TIGHTLY PACKED, OR MAY HAVE SPARES ADDED TO KEEP EACH EVENT BYTE-ALIGNED. IN EITHER CASE, THERE MAY BE UP TO 1 BYTE OF PADDING TO KEEP THE TOTAL SIZE OF THE PACKET EVEN. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; + +MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL COUNTERS INCLUDED): + +16 SPINS X 16 POSITIONS X 24 SPIN-ANGLES X 32 BITS X 22 COUNTERS = 4,325,376 BITS (540,672 BYTES) + +NOMINAL (BASED ON CURRENT PLAN FOR COUNTER SELECTION, COLLAPSING, AND COMPRESSION): 840 BITS (105 BYTES) + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X SPIN NUMBER DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; + +MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL COUNTERS INCLUDED): + +16 SPINS X 16 POSITIONS X 24 SPIN-ANGLES X 32 BITS X 22 COUNTERS = 4,325,376 BITS (540,672 BYTES) + +NOMINAL (BASED ON CURRENT PLAN FOR COUNTER SELECTION, COLLAPSING, AND COMPRESSION): 840 BITS (105 BYTES) + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X SPIN NUMBER DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL 146 SQRT(2) SPECEIS COUNTERS INCLUDED): + +16 SPINS X 16 POSITIONS X 24 SPIN-ANGLES X 32 BITS X 146 COUNTERS = 28,704,768 BITS (3,588,096 BYTES) + +NOMINAL (BASED ON CURRENT PLAN FOR COUNTER SELECTION, COLLAPSING, AND COMPRESSION): 42,240 BITS (5,281 BYTES) + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X SPIN NUMBER DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL X2 SPECEIS COUNTERS INCLUDED): + +16 SPINS X 16 POSITIONS X 24 SPIN-ANGLES X 32 BITS X 46 COUNTERS = 9,043,968 BITS (1,130,496 BYTES) + +NOMINAL (BASED ON CURRENT PLAN FOR COUNTER SELECTION, COLLAPSING, AND COMPRESSION): 4672 BITS (584 BYTES) + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X SPIN NUMBER DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + SECONDARY HEADER - WHOLE-SECONDS PART OF SCLK + + + PACKET VERSION - THIS WILL BE INCREMENTED EACH TIME THE FORMAT OF THE PACKET CHANGES. + + + SPIN PERIOD REPORTED BY THE SPACECRAFT IN THE TIME AND STATUS MESSAGE. REPORTED PERIOD IS THE PERIOD THAT WAS ACTIVE WHEN THE 16-SPIN ACQUISITION CYCLE STARTED. + + + FULL-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED + + + SUB-SECONDS PORTION OF THE TIME AT WHICH THE 16-SPIN CYCLE STARTED (MICROSECONDS) + + + SPARE FOR ALIGNMENT + + + BIAS GAIN MODE FOR THE SUPRATHERMAL SECTOR + + + BIAS GAIN MODE FOR THE SOLARWIND SECTOR + + + UNIQUE ID ASSIGNED TO A SPECIFIC TABLE CONFIGURATION. THIS FIELD IS USED TO LINK THE OVERALL ACQUISITION AND PROCESSING SETTINGS TO A SPECIFIC TABLE CONFIGURATION. + + + PLAN TABLE THAT WAS IN USE + + + PLAN STEP THAT WAS ACTIVE WHEN THIS DATA WAS ACQUIRED AND PROCESSED. + + + VIEW ID PROVIDES INFORMATION ABOUT HOW DATA WAS COLLAPSED AND/OR COMPRESSED. + + + SPARE FOR ALIGNMENT + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN REDUCED GAIN FACTOR OPERATION (RGFO) WAS ACTIVED. IN RGFO, THE ENTRANCE ESA VOLTAGE IS REDUCED IN ORDER TO LIMIT THE NUMBER OF IONS THAT REACH THE DETECTORS. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + INDICATES THE POINT WHEN NO-SCAN OPERATION (NSO) WAS ACTIVED. IN NSO, THE ESA VOLTAGE IS SET TO THE FIRST STEP IN THE SCAN AND REMAINS FIXED UNTIL THE NEXT CYCLE BOUNDARY. + + + SPARE FOR ALIGNMENT + + + SPARE FOR ALIGNMENT + + + INDICATES THAT THERE WAS SOME ERROR DETECTED DURING ACQUISITION OR PROCESSING OF THE DATA. ERRORS COULD INCLUDE CORRUPTED ACQUISITION MEMORY (I.E. EDAC ERRORS), TIMING VIOLATIONS, OR OTHER EVENTS THAT INTERRUPTED OR OTHERWISE AFFECTED DATA COLLECTION. + + + WHETHER/HOW THE DATA IS COMPRESSED. + + + NUMBER OF BYTES IN THE DATA ARRAY. IF COMPRESSED, THIS VALUE REPRESENTS THE LENGTH OF THE COMPRESSED DATA. + + + COUNTER DATA + +VARIABLE LENGTH; + +MAXIMUM (BASED ON UNCOLLAPSED, UNCOMPRESSED DATA, AND ASSUMING ALL COUNTERS INCLUDED): + +16 SPINS X 16 POSITIONS X 24 SPIN-ANGLES X 32 BITS X 22 COUNTERS = 4,325,376 BITS (540,672 BYTES) + +NOMINAL (BASED ON CURRENT PLAN FOR COUNTER SELECTION, COLLAPSING, AND COMPRESSION): 840 BITS (105 BYTES) + +DATA FORMAT IS A SERIES OF SPIN-ANGLE X POSITION X SPIN NUMBER DATA CUBES COLLAPSED PER THE SCI_LUT COLLAPSE TABLE SELECTED BY THE VIEW_ID. WHICH COUNTERS ARE INCLUDED IS DETERMINED BY USING THE PLAN_ID AND PLAN_STEP TO INDEX INTO THE SCI_LUT DATA PRODUCTS HI/LO TABLES TO FIND ALL THE COUNTERS THAT ARE ASSOCIATED WITH THE VIEW_ID. + +THE COLLAPSED DATA CUBES ARE ALSO OPTIONALLY COMPRESSED USING LOSSY AND/OR LOSSLESS COMPRESSION. LOSSY COMPRESSION IS A TABLE-BASED 24->8 BIT COMPRESSION APPLIED TO EACH COUNTER VALUE. LOSSLESS COMPRESSION USES THE LZMA COMPRESSION ALGORITHM AND IS APPLIED TO THE FULL DATA FIELD AS A SINGLE UNIT. + +FIELD WILL ADDITIONALLY BE PADDED IN ORDER TO MEET THE REQUIREMENT OF PACKETS BEING A MULTIPLE OF 16 BITS; ANY PAD BITS WILL BE ACCOUNTED FOR IN THE CCSDS HEADER LENGTH FIELD, BUT WILL *NOT* BE INCLUDED IN THE BYTE_COUNT FIELD + +WHEN THIS ARRAY IS TOO LARGE FOR A SINGLE CCSDS PACKET, CODICE WILL UTILIZE THE CCSDS GROUPING FLAGS TO PROVIDE THE FULL DATA PACKET OVER SEVERAL CCSDS PACKETS. + + + PACKET CHECKSUM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/imap_processing/codice/utils.py b/imap_processing/codice/utils.py index bb5474dc83..fe1900a0a5 100644 --- a/imap_processing/codice/utils.py +++ b/imap_processing/codice/utils.py @@ -26,6 +26,8 @@ class ViewTabInfo: The APID for the packet. collapse_table : int Collapse table id used to determine the collapse pattern. + compression : int + Compression algorithm used for the packet. sensor : int Sensor id (0 for LO, 1 for HI). three_d_collapsed : int @@ -36,6 +38,7 @@ class ViewTabInfo: apid: int collapse_table: int + compression: int sensor: int three_d_collapsed: int view_id: int @@ -145,8 +148,9 @@ def get_view_tab_info(json_data: dict, view_id: int, apid: int) -> dict: apid_hex = f"0x{apid:X}" # This is how we get view information that will be used to get # collapse pattern: - # table_id -> view_tab -> (view_id, apid) -> sensor -> collapse_table - # 'view_tab': {'(0, 0x480)': {'collapse_table': 0, '3d_collapse': 1, 'sensor': 0} + # table_id -> view_tab -> (view_id, apid) -> sensor -> collapse_table ->compression + # 'view_tab': {'(0, 0x480)': {'collapse_table': 0, '3d_collapse': 1, 'sensor': 0, + # 'compression':0} view_tab = json_data.get("view_tab").get(f"({view_id}, {apid_hex})") return view_tab diff --git a/imap_processing/tests/codice/conftest.py b/imap_processing/tests/codice/conftest.py index f49c29857f..4ff50f42dd 100644 --- a/imap_processing/tests/codice/conftest.py +++ b/imap_processing/tests/codice/conftest.py @@ -170,7 +170,7 @@ def _side_effect(descriptor: str = None, data_type: str = None) -> list[Path]: / "codice" / "data" / "l1a_input" - / "imap_codice_hskp_20250814_v001.pkts" + / "imap_codice_l0_hskp_20250814_v001.pkts" ] if descriptor == "lo-nsw-species" and data_type == "l1b": return [ @@ -236,7 +236,13 @@ def _side_effect(descriptor: str = None, data_type: str = None) -> list[Path]: return [ TEST_DATA_PATH / "l1a_lut" - / "imap_codice_l1a-sci-lut_20251007_v004.json" + / "imap_codice_l1a-sci-lut_20251007_v005.json" + ] + elif descriptor == "l1a-sci-lut-jan": + return [ + TEST_DATA_PATH + / "l1a_lut" + / "imap_codice_l1a-sci-lut_20260129_v002.json" ] elif descriptor == "l2-hi-omni-efficiency": return [ @@ -280,6 +286,8 @@ def _side_effect(descriptor: str = None, data_type: str = None) -> list[Path]: return [ TEST_DATA_PATH / "l2_lut/imap_codice_l2-hi-tof-table_20250101_v001.csv" ] + elif descriptor == "fsw-changes": + return [TEST_DATA_PATH / "l1a_input/imap_codice_l0_raw_20260130_v001.pkts"] else: raise ValueError(f"Unknown descriptor: {descriptor}") diff --git a/imap_processing/tests/codice/test_codice_l1a.py b/imap_processing/tests/codice/test_codice_l1a.py index bb4f54cc0f..5c27efd637 100644 --- a/imap_processing/tests/codice/test_codice_l1a.py +++ b/imap_processing/tests/codice/test_codice_l1a.py @@ -31,6 +31,55 @@ pytestmark = pytest.mark.external_test_data +@patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") +def test_updated_packet_version(mock_get_file_paths, codice_lut_path, caplog): + """Tests the new FSW changes (jan 2026).""" + codice_lut_path_jan = codice_lut_path(descriptor="l1a-sci-lut-jan") + mock_get_file_paths.side_effect = [ + codice_lut_path(descriptor="fsw-changes", data_type="l0"), + *([codice_lut_path_jan] * 20), + ] + datasets = process_l1a(dependency=ProcessingInputCollection()) + # Assert that we have all of the expected datasets + assert len(datasets) == 17 + for ds in datasets: + # Only check lo products. Skip direct-events + if ( + "lo" not in ds.attrs["Data_type"] + or "direct-events" in ds.attrs["Data_type"] + ): + continue + # Check that the lo datasets contain the new unpacked variables + expected_vars = [ + "rgfo_spin_sector", + "nso_spin_sector", + "rgfo_esa_step", + "nso_esa_step", + ] + for var in expected_vars: + assert var in ds.data_vars, ( + f"Expected variable '{var}' not found in dataset" + ) + + # check that warnings are logged for missing "desired" species + assert ( + "Desired species heplusplus not found in actual species names from LUT" + in caplog.text + ) + assert ( + "Desired species oplus6 not found in actual species names from LUT" + in caplog.text + ) + assert ( + "Desired species heplus not found in actual species names from LUT" + in caplog.text + ) + assert ( + "Desired species cnoplus not found in actual species names from LUT" + in caplog.text + ) + + @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_hskp(mock_get_file_paths, codice_lut_path): """Tests the housekeeping.""" @@ -342,6 +391,10 @@ def test_lo_nsw_species(mock_get_file_paths, codice_lut_path): processed_data = process_l1a(dependency=ProcessingInputCollection())[0] # Compare only the common variables for variable in val_data.data_vars: + # Skip cnopus because this variable should be thrown out for lo nsw species + # for table_ids <= 3978152295 + if "cnoplus" in variable: + continue np.testing.assert_allclose( processed_data[variable].values, val_data[variable].values, @@ -745,7 +798,8 @@ def test_direct_events_incomplete_groups(codice_lut_path, caplog): science_file = codice_lut_path(descriptor="lo-direct-events", data_type="l0")[0] xtce_file = ( - imap_module_directory / "codice/packet_definitions/codice_packet_definition.xml" + imap_module_directory + / "codice/packet_definitions/imap_codice_packet-definition_20250101_v001.xml" ) # Decom packet datasets_by_apid = packet_file_to_datasets( diff --git a/imap_processing/tests/codice/test_codice_l1b.py b/imap_processing/tests/codice/test_codice_l1b.py index 10b353caa6..bf5e498a35 100644 --- a/imap_processing/tests/codice/test_codice_l1b.py +++ b/imap_processing/tests/codice/test_codice_l1b.py @@ -103,6 +103,10 @@ def test_l1b_lo_nsw_species(mock_get_file_paths, codice_lut_path): processed_data = process_codice_l1b(processed_l1a_file) for variable in l1b_val_data.data_vars: + # Skip cnopus because this variable should be thrown out for lo nsw species + # for table_ids <= 3978152295 + if "cnoplus" in variable: + continue np.testing.assert_allclose( processed_data[variable].values, l1b_val_data[variable].values, diff --git a/imap_processing/tests/codice/test_codice_l2.py b/imap_processing/tests/codice/test_codice_l2.py index a22ff9c0df..a994fb975a 100644 --- a/imap_processing/tests/codice/test_codice_l2.py +++ b/imap_processing/tests/codice/test_codice_l2.py @@ -90,7 +90,9 @@ def mock_half_spin_per_esa_step(): ESA steps 0–63 belong to half_spin=2 ESA steps 64–127 belong to half_spin=3 """ - return np.repeat([2, 3], 64) + half_spin_per_esa = np.repeat([2, 3], 64) + # repeat along epoch dimension to create shape (2, 128) for testing + return np.tile(half_spin_per_esa, (2, 1)) def test_compute_geometric_factors_all_full_mode(mock_half_spin_per_esa_step): @@ -98,7 +100,13 @@ def test_compute_geometric_factors_all_full_mode(mock_half_spin_per_esa_step): dataset = xr.Dataset( { "rgfo_half_spin": (("epoch",), np.array([4, 4])), - "half_spin_per_esa_step": (("esa_step",), mock_half_spin_per_esa_step), + "half_spin_per_esa_step": ( + ( + "epoch", + "esa_step", + ), + mock_half_spin_per_esa_step, + ), }, attrs={"Logical_file_id": "imap_codice_l1b_lo-sw-species_20250101_v001"}, ) @@ -125,7 +133,7 @@ def test_compute_geometric_factors_past_nov_24th(mock_half_spin_per_esa_step): "epoch", "esa_step", ), - np.tile(mock_half_spin_per_esa_step, (2, 1)), + mock_half_spin_per_esa_step, ), }, # Make sure epoch is past Nov 24th, 2025 @@ -147,7 +155,13 @@ def test_compute_geometric_factors_all_reduced_mode(mock_half_spin_per_esa_step) dataset = xr.Dataset( { "rgfo_half_spin": (("epoch",), np.array([1])), - "half_spin_per_esa_step": (("esa_step",), mock_half_spin_per_esa_step), + "half_spin_per_esa_step": ( + ( + "epoch", + "esa_step", + ), + mock_half_spin_per_esa_step[0:1], + ), }, attrs={"Logical_file_id": "imap_codice_l1b_lo-sw-species_20250101_v001"}, ) @@ -167,7 +181,13 @@ def test_compute_geometric_factors_mixed(mock_half_spin_per_esa_step): dataset = xr.Dataset( { "rgfo_half_spin": (("epoch",), np.array([2])), - "half_spin_per_esa_step": (("esa_step",), mock_half_spin_per_esa_step), + "half_spin_per_esa_step": ( + ( + "epoch", + "esa_step", + ), + mock_half_spin_per_esa_step[0:1], + ), }, attrs={"Logical_file_id": "imap_codice_l1b_lo-sw-species_20250101_v001"}, ) @@ -295,6 +315,9 @@ def test_process_lo_missing_species_intensity(): { "epoch": ("epoch", np.ones(5)), "energy_table": (("esa_step",), np.ones(128) * 10), + "packet_version": ("epoch", np.ones(5)), + "half_spin_per_esa_step": (("epoch", "esa_step"), np.ones((5, 128)) * 2), + "rgfo_half_spin": ("epoch", np.ones(5) * 2), } ) @@ -310,20 +333,14 @@ def test_process_lo_missing_species_intensity(): ), ): len_pos = 5 - process_lo_species_intensity( - l1b_val_data_processed, - LO_SW_SOLAR_WIND_SPECIES_VARIABLE_NAMES, - gf, - None, - list(np.arange(0, len_pos)), - ) - - for var in LO_SW_SOLAR_WIND_SPECIES_VARIABLE_NAMES: - assert var in l1b_val_data_processed, f"Missing variable {var} after processing" - # Check that all the missing species are filled with NaNs - assert not np.any(np.isfinite(l1b_val_data_processed[var].values)), ( - f"Variable {var} should be all NaNs" - ) + with pytest.raises(ValueError, match="Species hplus not found in dataset"): + process_lo_species_intensity( + l1b_val_data_processed, + LO_SW_SOLAR_WIND_SPECIES_VARIABLE_NAMES, + gf, + None, + list(np.arange(0, len_pos)), + ) def test_process_lo_angular_intensity(mock_get_file_paths, codice_lut_path): @@ -351,11 +368,15 @@ def test_process_lo_angular_intensity(mock_get_file_paths, codice_lut_path): ) for var in LO_SW_ANGULAR_VARIABLE_NAMES: + # Heplus is not in older CDFs + if var == "heplus" or var not in l1b_val_data_processed: + continue assert var in l1b_val_data_processed, f"Missing variable {var} after processing" # Check that values are non-negative - assert np.all(l1b_val_data_processed[var].values >= 0), ( - f"Variable {var} contains negative values" - ) + assert np.all( + (l1b_val_data_processed[var].values >= 0) + | np.isnan(l1b_val_data_processed[var].values) + ), f"Variable {var} contains negative values" # Check shape expected_shape = ( len(l1b_data.epoch), @@ -461,6 +482,10 @@ def test_codice_l2_nsw_species_intensity(mock_get_file_paths, codice_lut_path): ) l2_val_data = load_cdf(l2_val_data) for variable in l2_val_data.data_vars: + # Skip cnopus because this variable should be thrown out for lo nsw species + # for table_ids <= 3978152295 + if "cnoplus" in variable: + continue # NOTE: Replace nan with 0 for comparison as the validation data uses 0 processed_val = processed_2_ds[variable].values processed_val[np.isnan(processed_val)] = 0.0 @@ -489,7 +514,7 @@ def test_codice_l2_nsw_angular_intensity(mock_get_file_paths, codice_lut_path): codice_lut_path(descriptor="l2-lo-gfactor"), codice_lut_path(descriptor="l2-lo-efficiency"), ] - processed_2_ds = process_codice_l2("lo-nsw-species", ProcessingInputCollection()) + processed_2_ds = process_codice_l2("lo-nsw-angular", ProcessingInputCollection()) l2_val_data = ( imap_module_directory / "tests" @@ -565,6 +590,32 @@ def test_codice_l2_sw_angular_intensity(mock_get_file_paths, codice_lut_path): write_cdf(processed_2_ds) +@patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") +def test_codice_l2_sw_angular_intensity_rgfo_masking( + mock_get_file_paths, codice_lut_path +): + """Tests RGFO masking after FSW changes (jan 2026).""" + codice_lut_path_jan = codice_lut_path(descriptor="l1a-sci-lut-jan") + mock_get_file_paths.side_effect = [ + codice_lut_path(descriptor="fsw-changes", data_type="l0"), + *([codice_lut_path_jan] * 20), + ] + datasets = process_l1a(dependency=ProcessingInputCollection()) + + ang_dataset = next(ds for ds in datasets if "angular" in ds.attrs["Data_type"]) + # process the first angular dataset + processed_l1a_file = write_cdf(ang_dataset) + processed_l1b_file = write_cdf(process_codice_l1b(processed_l1a_file)) + # Mock get_files for l2 + mock_get_file_paths.side_effect = [ + [processed_l1b_file.as_posix()], + codice_lut_path(descriptor="l2-lo-gfactor"), + codice_lut_path(descriptor="l2-lo-efficiency"), + ] + # TODO verify the results using validation data once we have some + process_codice_l2("lo-nsw-angular", ProcessingInputCollection()) + + @patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths") def test_codice_l2_lo_de(mock_get_file_paths, codice_lut_path): mock_get_file_paths.side_effect = [ diff --git a/imap_processing/tests/external_test_data_config.py b/imap_processing/tests/external_test_data_config.py index 70b178fcba..ad0702474d 100644 --- a/imap_processing/tests/external_test_data_config.py +++ b/imap_processing/tests/external_test_data_config.py @@ -33,10 +33,12 @@ ("imap_codice_l0_hi-sectored_20250814_v001.pkts", "codice/data/l1a_input/"), ("imap_codice_l0_hi-priority_20250814_v001.pkts", "codice/data/l1a_input/"), ("imap_codice_l0_hi-direct-events_20250814_v001.pkts", "codice/data/l1a_input/"), - ("imap_codice_hskp_20250814_v001.pkts", "codice/data/l1a_input/"), + ("imap_codice_l0_hskp_20250814_v001.pkts", "codice/data/l1a_input/"), + ("imap_codice_l0_raw_20260130_v001.pkts", "codice/data/l1a_input/"), # L1A LUT - ("imap_codice_l1a-sci-lut_20251007_v004.json", "codice/data/l1a_lut/"), + ("imap_codice_l1a-sci-lut_20251007_v005.json", "codice/data/l1a_lut/"), + ("imap_codice_l1a-sci-lut_20260129_v002.json", "codice/data/l1a_lut/"), # L1A validation data (f"imap_codice_l1a_hi-counters-aggregated_{VALIDATION_FILE_DATE}_{VALIDATION_FILE_VERSION}.cdf", "codice/data/l1a_validation"), diff --git a/imap_processing/tests/ialirt/unit/test_process_codice.py b/imap_processing/tests/ialirt/unit/test_process_codice.py index e40fd0500c..9aad28e516 100644 --- a/imap_processing/tests/ialirt/unit/test_process_codice.py +++ b/imap_processing/tests/ialirt/unit/test_process_codice.py @@ -372,7 +372,7 @@ def l1a_lut_path(): / "codice" / "data" / "l1a_lut" - / "imap_codice_l1a-sci-lut_20251007_v004.json" + / "imap_codice_l1a-sci-lut_20251007_v005.json" ) return lut_path @@ -401,7 +401,7 @@ def l2_processing_dependencies(): / "codice" / "data" / "l2_lut" - / "imap_codice_l2-lo-efficiency_20251008_v001.csv" + / "imap_codice_l2-lo-efficiency_20251212_v003.csv" ) gf_path = ( imap_module_directory @@ -409,7 +409,7 @@ def l2_processing_dependencies(): / "codice" / "data" / "l2_lut" - / "imap_codice_l2-lo-gfactor_20251008_v001.csv" + / "imap_codice_l2-lo-gfactor_20251212_v003.csv" ) return eff_path, gf_path diff --git a/imap_processing/ultra/constants.py b/imap_processing/ultra/constants.py index 983d29541f..d41cd0ff50 100644 --- a/imap_processing/ultra/constants.py +++ b/imap_processing/ultra/constants.py @@ -81,7 +81,7 @@ class UltraConstants: CULLING_RPM_MIN = 2.0 CULLING_RPM_MAX = 6.0 - # Thresholds for culling based on counts (keV). + # Energy Bounds for culling (keV). CULLING_ENERGY_BIN_EDGES: ClassVar[list] = [ 3.0, 10.0,