Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions imap_processing/codice/codice_l1a_lo_angular.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
from imap_processing.codice import constants
from imap_processing.codice.constants import HALF_SPIN_FILLVAL
from imap_processing.codice.decompress import decompress
from imap_processing.codice.utils import (
CODICEAPID,
Expand Down Expand Up @@ -197,12 +198,12 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
]
voltage_data = sci_lut_data["esa_sweep_tab"][f"{esa_table_number}"]

# If data size is less than 128, pad with nan to make it 128
# If data size is less than 128, pad with fillval to make it 128
half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data")
if len(half_spin_per_esa_step) < constants.NUM_ESA_STEPS:
pad_size = constants.NUM_ESA_STEPS - len(half_spin_per_esa_step)
half_spin_per_esa_step = np.concatenate(
(np.array(half_spin_per_esa_step), np.full(pad_size, np.nan))
(np.array(half_spin_per_esa_step), np.full(pad_size, HALF_SPIN_FILLVAL))
)
# TODO: Handle epoch dependent acquisition time and half spin per esa step
# For now, just tile the same array for all epochs.
Expand All @@ -228,7 +229,7 @@ def l1a_lo_angular(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
species_data = species_data.astype(np.float64)
species_data[species_mask] = np.nan
# Set half_spin_per_esa_step to (fillval) where nso_mask is True
half_spin_per_esa_step[nso_mask] = 63
half_spin_per_esa_step[nso_mask] = HALF_SPIN_FILLVAL
# Set acquisition_time_per_step to nan where nso_mask is True
acquisition_time_per_step[nso_mask] = np.nan
# ========= Get Epoch Time Data ===========
Expand Down
7 changes: 4 additions & 3 deletions imap_processing/codice/codice_l1a_lo_counters_aggregated.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
from imap_processing.codice import constants
from imap_processing.codice.constants import HALF_SPIN_FILLVAL
from imap_processing.codice.decompress import decompress
from imap_processing.codice.utils import (
ViewTabInfo,
Expand Down Expand Up @@ -114,12 +115,12 @@ def l1a_lo_counters_aggregated(
-1, esa_step, num_variables, spin_sector_pairs
)

# If data size is less than 128, pad with nan to make it 128
# If data size is less than 128, pad with fillval to make it 128
half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data")
if len(half_spin_per_esa_step) < constants.NUM_ESA_STEPS:
pad_size = constants.NUM_ESA_STEPS - len(half_spin_per_esa_step)
half_spin_per_esa_step = np.concatenate(
(np.array(half_spin_per_esa_step), np.full(pad_size, np.nan))
(np.array(half_spin_per_esa_step), np.full(pad_size, HALF_SPIN_FILLVAL))
)
# TODO: Handle epoch dependent acquisition time and half spin per esa step
# For now, just tile the same array for all epochs.
Expand All @@ -145,7 +146,7 @@ def l1a_lo_counters_aggregated(
counters_data = counters_data.astype(np.float64)
counters_data[counters_mask] = np.nan
# Set half_spin_per_esa_step to (fillval) where nso_mask is True
half_spin_per_esa_step[nso_mask] = 63
half_spin_per_esa_step[nso_mask] = HALF_SPIN_FILLVAL
# Set acquisition_time_per_step to nan where nso_mask is True
acquisition_time_per_step[nso_mask] = np.nan

Expand Down
7 changes: 4 additions & 3 deletions imap_processing/codice/codice_l1a_lo_counters_singles.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
from imap_processing.codice import constants
from imap_processing.codice.constants import HALF_SPIN_FILLVAL
from imap_processing.codice.decompress import decompress
from imap_processing.codice.utils import (
ViewTabInfo,
Expand Down Expand Up @@ -111,12 +112,12 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.
.transpose(0, 1, 3, 2)
)

# If data size is less than 128, pad with nan to make it 128
# If data size is less than 128, pad with fillval to make it 128
half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data")
if len(half_spin_per_esa_step) < constants.NUM_ESA_STEPS:
pad_size = constants.NUM_ESA_STEPS - len(half_spin_per_esa_step)
half_spin_per_esa_step = np.concatenate(
(np.array(half_spin_per_esa_step), np.full(pad_size, np.nan))
(np.array(half_spin_per_esa_step), np.full(pad_size, HALF_SPIN_FILLVAL))
)
# TODO: Handle epoch dependent acquisition time and half spin per esa step
# For now, just tile the same array for all epochs.
Expand All @@ -142,7 +143,7 @@ def l1a_lo_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.
counters_data = counters_data.astype(np.float64)
counters_data[counters_mask] = np.nan
# Set half_spin_per_esa_step to (fillval) where nso_mask is True
half_spin_per_esa_step[nso_mask] = 63
half_spin_per_esa_step[nso_mask] = HALF_SPIN_FILLVAL
# Set acquisition_time_per_step to nan where nso_mask is True
acquisition_time_per_step[nso_mask] = np.nan

Expand Down
7 changes: 4 additions & 3 deletions imap_processing/codice/codice_l1a_lo_priority.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
from imap_processing.codice import constants
from imap_processing.codice.constants import HALF_SPIN_FILLVAL
from imap_processing.codice.decompress import decompress
from imap_processing.codice.utils import (
CODICEAPID,
Expand Down Expand Up @@ -133,12 +134,12 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
num_packets, num_species, esa_steps, collapse_shape[0]
)

# If data size is less than 128, pad with nan to make it 128
# If data size is less than 128, pad with fillval to make it 128
half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data")
if len(half_spin_per_esa_step) < constants.NUM_ESA_STEPS:
pad_size = constants.NUM_ESA_STEPS - len(half_spin_per_esa_step)
half_spin_per_esa_step = np.concatenate(
(np.array(half_spin_per_esa_step), np.full(pad_size, np.nan))
(np.array(half_spin_per_esa_step), np.full(pad_size, HALF_SPIN_FILLVAL))
)

# TODO: Handle epoch dependent acquisition time and half spin per esa step
Expand All @@ -165,7 +166,7 @@ def l1a_lo_priority(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
species_data = species_data.astype(np.float64)
species_data[species_mask] = np.nan
# Set half_spin_per_esa_step to (fillval) where nso_mask is True
half_spin_per_esa_step[nso_mask] = 63
half_spin_per_esa_step[nso_mask] = HALF_SPIN_FILLVAL
# Set acquisition_time_per_step to nan where nso_mask is True
acquisition_time_per_step[nso_mask] = np.nan

Expand Down
7 changes: 4 additions & 3 deletions imap_processing/codice/codice_l1a_lo_species.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@

from imap_processing.cdf.imap_cdf_manager import ImapCdfAttributes
from imap_processing.codice import constants
from imap_processing.codice.constants import HALF_SPIN_FILLVAL
from imap_processing.codice.decompress import decompress
from imap_processing.codice.utils import (
CODICEAPID,
Expand Down Expand Up @@ -121,12 +122,12 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
num_packets, num_species, esa_steps, *collapsed_shape
)

# If data size is less than 128, pad with nan to make it 128
# If data size is less than 128, pad with fillval to make it 128
half_spin_per_esa_step = sci_lut_data["lo_stepping_tab"]["row_number"].get("data")
if len(half_spin_per_esa_step) < constants.NUM_ESA_STEPS:
pad_size = constants.NUM_ESA_STEPS - len(half_spin_per_esa_step)
half_spin_per_esa_step = np.concatenate(
(np.array(half_spin_per_esa_step), np.full(pad_size, np.nan))
(np.array(half_spin_per_esa_step), np.full(pad_size, HALF_SPIN_FILLVAL))
)

acquisition_time_per_step = calculate_acq_time_per_step(
Expand Down Expand Up @@ -155,7 +156,7 @@ def l1a_lo_species(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
species_data = species_data.astype(np.float64)
species_data[species_mask] = np.nan
# Set half_spin_per_esa_step to (fillval) where nso_mask is True
half_spin_per_esa_step[nso_mask] = 63
half_spin_per_esa_step[nso_mask] = HALF_SPIN_FILLVAL
# Set acquisition_time_per_step to nan where nso_mask is True
acquisition_time_per_step[nso_mask] = np.nan

Expand Down
15 changes: 12 additions & 3 deletions imap_processing/codice/codice_l2.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
from imap_processing.cdf.utils import load_cdf
from imap_processing.codice.constants import (
GAIN_ID_TO_STR,
HALF_SPIN_FILLVAL,
HI_L2_ELEVATION_ANGLE,
HI_OMNI_VARIABLE_NAMES,
HI_SECTORED_VARIABLE_NAMES,
Expand Down Expand Up @@ -323,8 +324,14 @@ def compute_geometric_factors(
raise ValueError("Dataset is missing Logical_file_id attribute.")
processing_date = datetime.datetime.strptime(start_date.split("_")[4], "%Y%m%d")
date_switch = datetime.datetime(2025, 11, 24)
# Only consider valid half spins
valid_half_spin = half_spin_per_esa_step != HALF_SPIN_FILLVAL
if processing_date < date_switch:
modes = (half_spin_per_esa_step > rgfo_half_spin) & (rgfo_half_spin > 0)
modes = (
valid_half_spin
& (half_spin_per_esa_step > rgfo_half_spin)
& (rgfo_half_spin > 0)
)
else:
# After November 24th, 2025, we no longer apply reduced geometric factors;
# always use the full geometric factor lookup.
Expand Down Expand Up @@ -582,8 +589,10 @@ def process_lo_angular_intensity(
# is odd, the configuration is B.
# TODO handle when half_spin_per_esa_step changes in the middle of the dataset
half_spin_per_esa_step = dataset["half_spin_per_esa_step"].data[0]
a_inds = np.where(half_spin_per_esa_step % 2 == 0)[0]
b_inds = np.where(half_spin_per_esa_step % 2 == 1)[0]
# only consider valid half spin values
valid_half_spin = half_spin_per_esa_step != HALF_SPIN_FILLVAL
a_inds = np.nonzero(valid_half_spin & (half_spin_per_esa_step % 2 == 0))[0]
b_inds = np.nonzero(valid_half_spin & (half_spin_per_esa_step % 2 == 1))[0]

position_index = position_index_to_adjust
for species in species_list:
Expand Down
2 changes: 2 additions & 0 deletions imap_processing/codice/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -1054,3 +1054,5 @@
318.39,
]
)

HALF_SPIN_FILLVAL = 63