Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ cython_debug/
# Data that is downloaded
data/
test_data/
/imap_processing/tests/glows/validation_data/
/imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-magi-burst-in.csv
/imap_processing/tests/mag/validation/L1c/T013/mag-l1b-l1c-t013-mago-burst-in.csv
/imap_processing/tests/mag/validation/L1c/T014/mag-l1b-l1c-t014-magi-burst-in.csv
Expand Down
45 changes: 35 additions & 10 deletions imap_processing/codice/codice_l1a.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from imap_processing.codice.codice_l1a_lo_species import l1a_lo_species
from imap_processing.codice.utils import (
CODICEAPID,
process_by_table_id,
)
from imap_processing.utils import packet_file_to_datasets

Expand Down Expand Up @@ -79,21 +80,29 @@ def process_l1a( # noqa: PLR0912

if apid == CODICEAPID.COD_LO_SW_SPECIES_COUNTS:
logger.info("Processing Lo SW Species Counts")
datasets.append(l1a_lo_species(datasets_by_apid[apid], lut_file))
datasets.append(
process_by_table_id(datasets_by_apid[apid], lut_file, l1a_lo_species)
)
elif apid == CODICEAPID.COD_LO_NSW_SPECIES_COUNTS:
logger.info("Processing Lo NSW Species Counts")
datasets.append(l1a_lo_species(datasets_by_apid[apid], lut_file))
datasets.append(
process_by_table_id(datasets_by_apid[apid], lut_file, l1a_lo_species)
)
elif apid == CODICEAPID.COD_LO_SW_ANGULAR_COUNTS:
logger.info("Processing Lo SW Angular Counts")
datasets.append(l1a_lo_angular(datasets_by_apid[apid], lut_file))
elif apid == CODICEAPID.COD_LO_NSW_ANGULAR_COUNTS:
logger.info("Processing Lo NSW Angular Counts")
datasets.append(l1a_lo_angular(datasets_by_apid[apid], lut_file))
elif apid == CODICEAPID.COD_HI_OMNI_SPECIES_COUNTS:
datasets.append(l1a_hi_omni(datasets_by_apid[apid], lut_file))
datasets.append(
process_by_table_id(datasets_by_apid[apid], lut_file, l1a_hi_omni)
)
elif apid == CODICEAPID.COD_HI_SECT_SPECIES_COUNTS:
logger.info("Processing Hi Sectored Species Counts")
datasets.append(l1a_hi_sectored(datasets_by_apid[apid], lut_file))
datasets.append(
process_by_table_id(datasets_by_apid[apid], lut_file, l1a_hi_sectored)
)
elif apid == CODICEAPID.COD_HI_PHA:
logger.info("Processing Direct Events for Hi")
datasets.append(l1a_direct_event(datasets_by_apid[apid], apid=apid))
Expand All @@ -105,26 +114,42 @@ def process_l1a( # noqa: PLR0912
CODICEAPID.COD_LO_NSW_PRIORITY_COUNTS,
]:
logger.info(f"Processing {apid} Priority Counts")
datasets.append(l1a_lo_priority(datasets_by_apid[apid], lut_file))
datasets.append(
process_by_table_id(datasets_by_apid[apid], lut_file, l1a_lo_priority)
)
elif apid == CODICEAPID.COD_HI_INST_COUNTS_PRIORITIES:
logger.info("Processing Hi Priority Counts")
datasets.append(l1a_hi_priority(datasets_by_apid[apid], lut_file))
datasets.append(
process_by_table_id(datasets_by_apid[apid], lut_file, l1a_hi_priority)
)
elif apid == CODICEAPID.COD_HI_INST_COUNTS_AGGREGATED:
logger.info("Processing Hi Counters aggregated")
datasets.append(
l1a_hi_counters_aggregated(datasets_by_apid[apid], lut_file)
process_by_table_id(
datasets_by_apid[apid], lut_file, l1a_hi_counters_aggregated
)
)
elif apid == CODICEAPID.COD_HI_INST_COUNTS_SINGLES:
logger.info("Processing Hi Counters singles")
datasets.append(l1a_hi_counters_singles(datasets_by_apid[apid], lut_file))
datasets.append(
process_by_table_id(
datasets_by_apid[apid], lut_file, l1a_hi_counters_singles
)
)
elif apid == CODICEAPID.COD_LO_INST_COUNTS_AGGREGATED:
logger.info("Processing Lo Counters aggregated")
datasets.append(
l1a_lo_counters_aggregated(datasets_by_apid[apid], lut_file)
process_by_table_id(
datasets_by_apid[apid], lut_file, l1a_lo_counters_aggregated
)
)
elif apid == CODICEAPID.COD_LO_INST_COUNTS_SINGLES:
logger.info("Processing Lo Counters singles")
datasets.append(l1a_lo_counters_singles(datasets_by_apid[apid], lut_file))
datasets.append(
process_by_table_id(
datasets_by_apid[apid], lut_file, l1a_lo_counters_singles
)
)
elif apid == CODICEAPID.COD_NHK:
logger.info("Processing l1a housekeeping data")
cdf_attrs = ImapCdfAttributes()
Expand Down
64 changes: 30 additions & 34 deletions imap_processing/codice/codice_l1a_hi_counters_aggregated.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,59 +11,55 @@
from imap_processing.codice.decompress import decompress
from imap_processing.codice.utils import (
CoDICECompression,
ViewTabInfo,
get_codice_epoch_time,
get_counters_aggregated_pattern,
get_view_tab_info,
read_sci_lut,
get_view_tab_obj,
)
from imap_processing.spice.time import met_to_ttj2000ns

logger = logging.getLogger(__name__)


def l1a_hi_counters_aggregated(
unpacked_dataset: xr.Dataset, lut_file: Path
group_ds: xr.Dataset,
lut_file: Path,
table_id: str,
view_id: int,
apid: int,
plan_id: int,
plan_step: int,
) -> xr.Dataset:
"""
Process CoDICE Hi Counters aggregated L1A data.
Process a single table-ID group of CoDICE Hi Counters Aggregated L1A data.

Parameters
----------
unpacked_dataset : xarray.Dataset
Unpacked dataset from L0 packet file.
group_ds : xarray.Dataset
Dataset filtered to a single table_id.
lut_file : Path
Path to the LUT file for processing.
table_id : str
The table ID for this group.
view_id : int
View ID (uniform across the product).
apid : int
APID (uniform across the product).
plan_id : int
Plan ID (uniform across the product).
plan_step : int
Plan step (uniform across the product).

Returns
-------
xarray.Dataset
Processed L1A dataset for Hi Omni data.
Processed L1A dataset for input table-ID group.
"""
# lookup in LUT table.
table_id = unpacked_dataset["table_id"].values[0]
view_id = unpacked_dataset["view_id"].values[0]
apid = unpacked_dataset["pkt_apid"].values[0]
plan_id = unpacked_dataset["plan_id"].values[0]
plan_step = unpacked_dataset["plan_step"].values[0]

logger.info(
f"Processing species with - APID: {apid} / 0x{apid:X}, View ID: {view_id}, "
f"Table ID: {table_id}, Plan ID: {plan_id}, Plan Step: {plan_step}"
)
# ========== Get LUT Data ===========
# Read information from LUT
sci_lut_data = read_sci_lut(lut_file, table_id)

view_tab_info = get_view_tab_info(sci_lut_data, view_id, apid)
view_tab_obj = ViewTabInfo(
apid=apid,
view_id=view_id,
sensor=view_tab_info["sensor"],
three_d_collapsed=view_tab_info["3d_collapse"],
collapse_table=view_tab_info["collapse_table"],
compression=view_tab_info["compression"],
)
sci_lut_data, view_tab_obj = get_view_tab_obj(lut_file, table_id, view_id, apid)

if view_tab_obj.sensor != 1:
raise ValueError("Unsupported sensor ID for Hi processing.")
Expand All @@ -85,8 +81,8 @@ def l1a_hi_counters_aggregated(
# of active variables to reshape decompressed data.
num_variables = len(non_reserved_variables)
# Decompress data using byte count information from decommed data
binary_data_list = unpacked_dataset["data"].values
byte_count_list = unpacked_dataset["byte_count"].values
binary_data_list = group_ds["data"].values
byte_count_list = group_ds["byte_count"].values

compression_algorithm = CoDICECompression(view_tab_obj.compression)

Expand All @@ -109,9 +105,9 @@ def l1a_hi_counters_aggregated(
# ========= Get Epoch Time Data ===========
# Epoch center time and delta
epoch_center, deltas = get_codice_epoch_time(
unpacked_dataset["acq_start_seconds"].values,
unpacked_dataset["acq_start_subseconds"].values,
unpacked_dataset["spin_period"].values,
group_ds["acq_start_seconds"].values,
group_ds["acq_start_subseconds"].values,
group_ds["spin_period"].values,
view_tab_obj,
)

Expand Down Expand Up @@ -147,12 +143,12 @@ def l1a_hi_counters_aggregated(

# Add first few unique variables
l1a_dataset["spin_period"] = xr.DataArray(
unpacked_dataset["spin_period"].values * constants.SPIN_PERIOD_CONVERSION,
group_ds["spin_period"].values * constants.SPIN_PERIOD_CONVERSION,
dims=("epoch",),
attrs=cdf_attrs.get_variable_attributes("spin_period"),
)
l1a_dataset["data_quality"] = xr.DataArray(
unpacked_dataset["suspect"].values,
group_ds["suspect"].values,
dims=("epoch",),
attrs=cdf_attrs.get_variable_attributes("data_quality"),
)
Expand Down
66 changes: 32 additions & 34 deletions imap_processing/codice/codice_l1a_hi_counters_singles.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,57 +11,55 @@
from imap_processing.codice.decompress import decompress
from imap_processing.codice.utils import (
CoDICECompression,
ViewTabInfo,
get_codice_epoch_time,
get_collapse_pattern_shape,
get_view_tab_info,
read_sci_lut,
get_view_tab_obj,
)
from imap_processing.spice.time import met_to_ttj2000ns

logger = logging.getLogger(__name__)


def l1a_hi_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.Dataset:
def l1a_hi_counters_singles(
group_ds: xr.Dataset,
lut_file: Path,
table_id: str,
view_id: int,
apid: int,
plan_id: int,
plan_step: int,
) -> xr.Dataset:
"""
Process CoDICE Hi Counters singles L1A data.
Process a single table-ID group of CoDICE Hi Counters Singles L1A data.

Parameters
----------
unpacked_dataset : xarray.Dataset
Unpacked dataset from L0 packet file.
group_ds : xarray.Dataset
Dataset filtered to a single table_id.
lut_file : Path
Path to the LUT file for processing.
table_id : str
The table ID for this group.
view_id : int
View ID (uniform across the product).
apid : int
APID (uniform across the product).
plan_id : int
Plan ID (uniform across the product).
plan_step : int
Plan step (uniform across the product).

Returns
-------
xarray.Dataset
Processed L1A dataset for Hi Omni data.
Processed L1A dataset for input table-ID group.
"""
# lookup in LUT table.
table_id = unpacked_dataset["table_id"].values[0]
view_id = unpacked_dataset["view_id"].values[0]
apid = unpacked_dataset["pkt_apid"].values[0]
plan_id = unpacked_dataset["plan_id"].values[0]
plan_step = unpacked_dataset["plan_step"].values[0]

logger.info(
f"Processing species with - APID: {apid} / 0x{apid:X}, View ID: {view_id}, "
f"Table ID: {table_id}, Plan ID: {plan_id}, Plan Step: {plan_step}"
)
# ========== Get LUT Data ===========
# Read information from LUT
sci_lut_data = read_sci_lut(lut_file, table_id)

view_tab_info = get_view_tab_info(sci_lut_data, view_id, apid)
view_tab_obj = ViewTabInfo(
apid=apid,
view_id=view_id,
sensor=view_tab_info["sensor"],
three_d_collapsed=view_tab_info["3d_collapse"],
collapse_table=view_tab_info["collapse_table"],
compression=view_tab_info["compression"],
)
sci_lut_data, view_tab_obj = get_view_tab_obj(lut_file, table_id, view_id, apid)

if view_tab_obj.sensor != 1:
raise ValueError("Unsupported sensor ID for Hi processing.")
Expand All @@ -82,8 +80,8 @@ def l1a_hi_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.

compression_algorithm = CoDICECompression(view_tab_obj.compression)
# Decompress data using byte count information from decommed data
binary_data_list = unpacked_dataset["data"].values
byte_count_list = unpacked_dataset["byte_count"].values
binary_data_list = group_ds["data"].values
byte_count_list = group_ds["byte_count"].values

# The decompressed data in the shape of (epoch, n). Then reshape later.
decompressed_data = [
Expand All @@ -103,9 +101,9 @@ def l1a_hi_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.
# ========= Get Epoch Time Data ===========
# Epoch center time and delta
epoch_center, deltas = get_codice_epoch_time(
unpacked_dataset["acq_start_seconds"].values,
unpacked_dataset["acq_start_subseconds"].values,
unpacked_dataset["spin_period"].values,
group_ds["acq_start_seconds"].values,
group_ds["acq_start_subseconds"].values,
group_ds["spin_period"].values,
view_tab_obj,
)

Expand Down Expand Up @@ -153,12 +151,12 @@ def l1a_hi_counters_singles(unpacked_dataset: xr.Dataset, lut_file: Path) -> xr.

# Add first few unique variables
l1a_dataset["spin_period"] = xr.DataArray(
unpacked_dataset["spin_period"].values * constants.SPIN_PERIOD_CONVERSION,
group_ds["spin_period"].values * constants.SPIN_PERIOD_CONVERSION,
dims=("epoch",),
attrs=cdf_attrs.get_variable_attributes("spin_period"),
)
l1a_dataset["data_quality"] = xr.DataArray(
unpacked_dataset["suspect"].values,
group_ds["suspect"].values,
dims=("epoch",),
attrs=cdf_attrs.get_variable_attributes("data_quality"),
)
Expand Down
Loading
Loading