Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 43 additions & 8 deletions imap_processing/codice/codice_l1a_de.py
Original file line number Diff line number Diff line change
Expand Up @@ -384,16 +384,51 @@ def process_de_data(
f"Found {len(incomplete_times)} incomplete priority group(s) "
f"for APID {apid}. Expected {num_priorities} packets per group. "
f"Incomplete groups at acq_start_seconds {incomplete_times.tolist()} "
f"with counts {incomplete_counts.tolist()}. Dropping these packets."
f"with counts {incomplete_counts.tolist()}. Padding with zeros."
)

# Keep only complete groups
complete_times = unique_times[~incomplete_mask]
keep_mask = np.isin(acq_start_seconds, complete_times)
packets = packets.isel(epoch=keep_mask)

# Calculate number of epochs from complete groups
num_epochs = len(complete_times)
# Create a list of groups with padding if any priorities are missing
padded_groups = []
for time, count in zip(unique_times, counts, strict=False):
# Get the packets for this group
group_ids = np.where(acq_start_seconds == time)[0]
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just checking. Will the acquisition time always be the same for each group?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, they are and that is the only way to identify a group from what we've found so far.

group = packets.isel(epoch=group_ids)
if count < num_priorities:
# Find missing priorities
existing_priorities = set(group["priority"].values)
missing_priorities = sorted(
set(range(num_priorities)) - existing_priorities
)
num_missing_priorities = len(missing_priorities)
# Use first packet as a template and expand along the epoch dimension
# for the number of missing priorities.
pad_packet = group.isel(epoch=[0] * num_missing_priorities).copy()
# Set padding values to zero
pad_packet["num_events"].values = np.full(num_missing_priorities, 0)
pad_packet["byte_count"].values = np.full(num_missing_priorities, 0)
pad_packet["priority"].values = missing_priorities
# Set event_data to empty object arrays for padding packets
for i in range(num_missing_priorities):
pad_packet["event_data"].data[i] = np.array([], dtype=np.uint8)
# Concatenate the existing priorities with the zeros priority groups
group = xr.concat([group, pad_packet], dim="epoch")
# Sort by priority
sort_idx = np.argsort(group["priority"].values)
group = group.isel(epoch=sort_idx)
elif count > num_priorities:
# TODO is this possible?
# Sort by priority
sort_idx = np.argsort(group["priority"].values)
group = group.isel(epoch=sort_idx)
# Keep only the first num_priorities packets
group = group.isel(epoch=slice(0, num_priorities))
padded_groups.append(group)

# Concatenate all groups
packets = xr.concat(padded_groups, dim="epoch")

# Calculate number of epochs
num_epochs = len(unique_times)

# Create dataset with coordinates
de_data = _create_dataset_coords(packets, apid, num_priorities, cdf_attrs)
Expand Down
4 changes: 3 additions & 1 deletion imap_processing/codice/codice_l1a_lo_angular.py
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,9 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
)
# For every energy after nso_half_spin, set data to fill values
nso_half_spin = unpacked_dataset["nso_half_spin"].values
nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis]
nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | (
half_spin_per_esa_step == HALF_SPIN_FILLVAL
)
species_mask = nso_mask[:, np.newaxis, :, np.newaxis, np.newaxis]
species_mask = np.broadcast_to(species_mask, species_data.shape)
species_data = species_data.astype(np.float64)
Expand Down
4 changes: 3 additions & 1 deletion imap_processing/codice/codice_l1a_lo_counters_aggregated.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,9 @@ def l1a_lo_counters_aggregated(
)
# For every energy after nso_half_spin, set data to fill values
nso_half_spin = unpacked_dataset["nso_half_spin"].values
nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis]
nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | (
half_spin_per_esa_step == HALF_SPIN_FILLVAL
)
counters_mask = nso_mask[:, :, np.newaxis, np.newaxis]
counters_mask = np.broadcast_to(counters_mask, counters_data.shape)
counters_data = counters_data.astype(np.float64)
Expand Down
4 changes: 3 additions & 1 deletion imap_processing/codice/codice_l1a_lo_counters_singles.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,9 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.
)
# For every energy after nso_half_spin, set data to fill values
nso_half_spin = unpacked_dataset["nso_half_spin"].values
nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis]
nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | (
half_spin_per_esa_step == HALF_SPIN_FILLVAL
)
counters_mask = nso_mask[:, :, np.newaxis, np.newaxis]
counters_mask = np.broadcast_to(counters_mask, counters_data.shape)
counters_data = counters_data.astype(np.float64)
Expand Down
4 changes: 3 additions & 1 deletion imap_processing/codice/codice_l1a_lo_priority.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,9 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
)
# For every energy after nso_half_spin, set data to fill values
nso_half_spin = unpacked_dataset["nso_half_spin"].values
nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis]
nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | (
half_spin_per_esa_step == HALF_SPIN_FILLVAL
)
species_mask = nso_mask[:, np.newaxis, :, np.newaxis]
species_mask = np.broadcast_to(species_mask, species_data.shape)
species_data = species_data.astype(np.float64)
Expand Down
4 changes: 3 additions & 1 deletion imap_processing/codice/codice_l1a_lo_species.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,9 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
)
# For every energy after nso_half_spin, set data to fill values
nso_half_spin = unpacked_dataset["nso_half_spin"].values
nso_mask = half_spin_per_esa_step > nso_half_spin[:, np.newaxis]
nso_mask = (half_spin_per_esa_step > nso_half_spin[:, np.newaxis]) | (
half_spin_per_esa_step == HALF_SPIN_FILLVAL
)
species_mask = nso_mask[:, np.newaxis, :, np.newaxis]
species_mask = np.repeat(species_mask, num_species, 1)
species_data = species_data.astype(np.float64)
Expand Down
37 changes: 36 additions & 1 deletion imap_processing/tests/codice/test_codice_l1a.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@
from imap_processing.cdf.utils import load_cdf, write_cdf
from imap_processing.codice import constants
from imap_processing.codice.codice_l1a import process_l1a
from imap_processing.codice.utils import read_sci_lut
from imap_processing.codice.codice_l1a_de import l1a_direct_event
from imap_processing.codice.utils import CODICEAPID, read_sci_lut
from imap_processing.tests.codice.conftest import (
VALIDATION_FILE_DATE,
VALIDATION_FILE_VERSION,
)
from imap_processing.utils import packet_file_to_datasets

logger = logging.getLogger(__name__)
pytestmark = pytest.mark.external_test_data
Expand Down Expand Up @@ -736,6 +738,39 @@ def test_lo_direct_events(mock_get_file_paths, codice_lut_path):
)


def test_direct_events_incomplete_groups(codice_lut_path, caplog):
"""Tests lo-direct-events with a packet containing incomplete groups."""

# Get science data which is L0 packet file
science_file = codice_lut_path(descriptor="lo-direct-events", data_type="l0")[0]

xtce_file = (
imap_module_directory / "codice/packet_definitions/codice_packet_definition.xml"
)
# Decom packet
datasets_by_apid = packet_file_to_datasets(
science_file,
xtce_file,
)
apid = CODICEAPID.COD_LO_PHA
de_dataset = datasets_by_apid[apid]
# Drop the first packet to test incomplete group handling
# This mocks the case when one priority group is incomplete
# in this example, the first group is missing the first priority
len_epoch = de_dataset.sizes["epoch"]
de_dataset = de_dataset.isel(epoch=slice(1, len_epoch))
dataset = l1a_direct_event(de_dataset, apid)
# Check that fillvals are used for the first missing priority for the first epoch
assert np.all(dataset.tof[0, 0, :].values == 65535)
# Check that there is data for the remaining priorities
assert np.any(dataset.tof[0, 1:, :].values != 65535)
# Check logs for incomplete groups
assert (
f"Found 1 incomplete priority group(s) for APID {apid}. "
f"Expected 8 packets per group"
) in caplog.text


@patch("imap_data_access.processing_input.ProcessingInputCollection.get_file_paths")
def test_hi_direct_events(mock_get_file_paths, codice_lut_path):
"""Tests hi-direct-events."""
Expand Down