Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions .github/workflows/spec_zero.yml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ jobs:
env:
GH_TOKEN: ${{ secrets.MNE_BOT_TOKEN }}
GITHUB_TOKEN: ${{ secrets.MNE_BOT_TOKEN }}
defaults:
run:
shell: bash -el {0}
steps:
- uses: actions/checkout@v6
with:
Expand Down Expand Up @@ -49,6 +52,11 @@ jobs:
echo "dirty=true" >> $GITHUB_OUTPUT
fi
id: status
- name: Run pre-commit hooks to update other files
run: |
pip install pre-commit
pre-commit run --all
if: steps.status.outputs.dirty == 'true'
- name: Create PR
run: |
set -xeo pipefail
Expand Down
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
repos:
# Ruff mne
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.14.7
rev: v0.14.9
hooks:
- id: ruff-check
name: ruff lint mne
Expand Down
1 change: 1 addition & 0 deletions doc/changes/dev/13521.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix bug preventing :func:`mne.time_frequency.read_spectrum` from reading saved :class:`mne.time_frequency.Spectrum` objects created from :meth:`mne.time_frequency.EpochsSpectrum.average`, by `Thomas Binns`_.
1 change: 1 addition & 0 deletions doc/changes/dev/13525.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix bug where :func:`mne.chpi.refit_hpi` did not take ``gof_limit`` into account when fitting HPI order, by `Eric Larson`_
1 change: 1 addition & 0 deletions doc/changes/dev/13526.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix bug preventing reading of :class:`mne.time_frequency.Spectrum` and :class:`mne.time_frequency.BaseTFR` objects created in MNE<1.8 using the deprecated subject info birthday tuple format, by `Thomas Binns`_.
1 change: 1 addition & 0 deletions doc/changes/dev/13528.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix bug where invalid date formats passed to :meth:`mne.Info.set_meas_date` were not caught, by `Thomas Binns`_.
2 changes: 2 additions & 0 deletions doc/changes/dev/13539.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Fix bug with :ref:`mne coreg` where interactively altered MRI fiducials were not used when
scaling a surrogate subject, by `Eric Larson`_.
1 change: 1 addition & 0 deletions doc/changes/dev/13549.bugfix.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix bug with :func:`mne.viz.plot_evoked` where channels were plotted above axis spines, by `Michael Straube`_.
4 changes: 2 additions & 2 deletions doc/sphinxext/directive_formatting.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def check_directive_formatting(*args):
# another directive/another directive's content)
if idx == 0:
continue
dir_pattern = r"\.\. [a-zA-Z]+::"
dir_pattern = r"^\s*\.\. \w+::" # line might start with whitespace
head_pattern = r"^[-|=|\^]+$"
directive = re.search(dir_pattern, line)
if directive is not None:
Expand All @@ -84,5 +84,5 @@ def check_directive_formatting(*args):
if bad:
sphinx_logger.warning(
f"{source_type} '{name}' is missing a blank line before the "
f"directive '{directive.group()}'"
f"directive '{directive.group()}' on line {idx + 1}"
)
2 changes: 1 addition & 1 deletion environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ dependencies:
- PySide6 !=6.9.1
- python-neo
- python-picard
- pyvista >=0.42.1
- pyvista >=0.43
- pyvistaqt >=0.11
- qdarkstyle !=3.2.2
- qtpy
Expand Down
7 changes: 3 additions & 4 deletions examples/decoding/decoding_time_generalization_conditions.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,9 @@
=========================================================================

This example runs the analysis described in :footcite:`KingDehaene2014`. It
illustrates how one can
fit a linear classifier to identify a discriminatory topography at a given time
instant and subsequently assess whether this linear model can accurately
predict all of the time samples of a second set of conditions.
illustrates how one can fit a linear classifier to identify a discriminatory
topography at a given time instant and subsequently assess whether this linear
model can accurately predict all of the time samples of a second set of conditions.
"""
# Authors: Jean-Rémi King <jeanremi.king@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
Expand Down
8 changes: 7 additions & 1 deletion examples/decoding/decoding_xdawn_eeg.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
from mne import Epochs, io, pick_types, read_events
from mne.datasets import sample
from mne.decoding import Vectorizer, XdawnTransformer, get_spatial_filter_from_estimator
from mne.utils import check_version

print(__doc__)

Expand Down Expand Up @@ -70,11 +71,16 @@
)

# Create classification pipeline
kwargs = dict()
if check_version("sklearn", "1.8"):
kwargs["l1_ratio"] = 1
else:
kwargs["penalty"] = "l1"
clf = make_pipeline(
XdawnTransformer(n_components=n_filter),
Vectorizer(),
MinMaxScaler(),
OneVsRestClassifier(LogisticRegression(penalty="l1", solver="liblinear")),
OneVsRestClassifier(LogisticRegression(solver="liblinear", **kwargs)),
)

# Get the data and labels
Expand Down
4 changes: 4 additions & 0 deletions mne/_fiff/meas_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -833,6 +833,10 @@ def set_meas_date(self, meas_date):
"""
from ..annotations import _handle_meas_date

_validate_type(
meas_date, (datetime.datetime, "numeric", tuple, None), "meas_date"
)

info = self if isinstance(self, Info) else self.info

meas_date = _handle_meas_date(meas_date)
Expand Down
7 changes: 7 additions & 0 deletions mne/_fiff/tests/test_meas_info.py
Original file line number Diff line number Diff line change
Expand Up @@ -1199,6 +1199,13 @@ def test_invalid_subject_birthday():
assert "birthday" not in raw.info["subject_info"]


def test_invalid_set_meas_date():
"""Test set_meas_date catches invalid str input."""
info = create_info(1, 1000, "eeg")
with pytest.raises(TypeError, match=r"meas_date must be an instance of"):
info.set_meas_date("2025-01-01 00:00:00.000000")


@pytest.mark.slowtest
@pytest.mark.parametrize(
"fname",
Expand Down
78 changes: 58 additions & 20 deletions mne/chpi.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,27 +579,37 @@ def _chpi_objective(x, coil_dev_rrs, coil_head_rrs):
return d.sum()


def _fit_chpi_quat(coil_dev_rrs, coil_head_rrs):
def _fit_chpi_quat(coil_dev_rrs, coil_head_rrs, *, quat=None):
"""Fit rotation and translation (quaternion) parameters for cHPI coils."""
denom = np.linalg.norm(coil_head_rrs - np.mean(coil_head_rrs, axis=0))
denom *= denom
# We could try to solve it the analytic way:
# TODO someday we could choose to weight these points by their goodness
# of fit somehow, see also https://github.com/mne-tools/mne-python/issues/11330
quat = _fit_matched_points(coil_dev_rrs, coil_head_rrs)[0]
if quat is None:
quat = _fit_matched_points(coil_dev_rrs, coil_head_rrs)[0]
gof = 1.0 - _chpi_objective(quat, coil_dev_rrs, coil_head_rrs) / denom
return quat, gof


def _fit_coil_order_dev_head_trans(dev_pnts, head_pnts, *, bias=True, prefix=""):
def _fit_coil_order_dev_head_trans(
dev_pnts, head_pnts, *, bias=True, gofs=None, gof_limit=0.98, prefix=""
):
"""Compute Device to Head transform allowing for permutiatons of points."""
n_coils = len(dev_pnts)
id_quat = np.zeros(6)
best_order = None
best_order = np.full(n_coils, -1, dtype=int)
best_g = -999
best_quat = id_quat
for this_order in itertools.permutations(np.arange(len(head_pnts))):
assert dev_pnts.shape == head_pnts.shape == (n_coils, 3)
gofs = np.ones(n_coils) if gofs is None else gofs
use_mask = _gof_use_mask(gofs, gof_limit=gof_limit)
n_use = int(use_mask.sum()) # explicit int cast for itertools.permutations
dev_pnts_tmp = dev_pnts[use_mask]
# First pass: figure out best order using the good dev points
for this_order in itertools.permutations(np.arange(len(head_pnts)), n_use):
head_pnts_tmp = head_pnts[np.array(this_order)]
this_quat, g = _fit_chpi_quat(dev_pnts, head_pnts_tmp)
this_quat, g = _fit_chpi_quat(dev_pnts_tmp, head_pnts_tmp)
assert np.linalg.det(quat_to_rot(this_quat[:3])) > 0.9999
if bias:
# For symmetrical arrangements, flips can produce roughly
Expand All @@ -612,17 +622,35 @@ def _fit_coil_order_dev_head_trans(dev_pnts, head_pnts, *, bias=True, prefix="")
if check_g > best_g:
out_g = g
best_g = check_g
best_order = np.array(this_order)
best_order[use_mask] = this_order
best_quat = this_quat
del this_order
# Second pass: now fit the remaining (bad) coils using the best order and quat
# from above
missing = np.setdiff1d(np.arange(n_coils), best_order[best_order >= 0])
best_missing_g = -np.inf
for this_order in itertools.permutations(missing):
full_order = best_order.copy()
full_order[~use_mask] = this_order
assert (full_order >= 0).all()
assert np.array_equal(np.sort(full_order), np.arange(n_coils))
head_pnts_tmp = head_pnts[np.array(full_order)]
_, g = _fit_chpi_quat(dev_pnts, head_pnts_tmp, quat=best_quat)
if g > best_missing_g:
best_missing_g = g
best_order[:] = full_order
del this_order
assert np.array_equal(np.sort(best_order), np.arange(n_coils))

# Convert Quaterion to transform
dev_head_t = _quat_to_affine(best_quat)
ang, dist = angle_distance_between_rigid(
dev_head_t, angle_units="deg", distance_units="mm"
)
extra = f" using {n_use}/{n_coils} coils" if n_use < n_coils else ""
logger.info(
f"{prefix}Fitted dev_head_t {ang:0.1f}° and {dist:0.1f} mm "
f"from device origin (GOF: {out_g:.3f})"
f"from device origin{extra} (GOF: {out_g:.3f})"
)
return dev_head_t, best_order, out_g

Expand Down Expand Up @@ -1703,7 +1731,8 @@ def refit_hpi(
:func:`~mne.chpi.compute_chpi_locs`.
3. Optionally determine coil digitization order by testing all permutations
for the best goodness of fit between digitized coil locations and
(rigid-transformed) fitted coil locations.
(rigid-transformed) fitted coil locations, choosing the order first based on
those that satisfy ``gof_limit`` then the others.
4. Subselect coils to use for fitting ``dev_head_t`` based on ``gof_limit``,
``dist_limit``, and ``use``.
5. Update info inplace by modifying ``info["dev_head_t"]`` and appending new entries
Expand Down Expand Up @@ -1816,6 +1845,8 @@ def refit_hpi(
fit_dev_head_t, fit_order, _g = _fit_coil_order_dev_head_trans(
hpi_dev,
hpi_head,
gofs=hpi_gofs,
gof_limit=gof_limit,
prefix=" ",
)
else:
Expand All @@ -1824,27 +1855,21 @@ def refit_hpi(

# 4. Subselect usable coils and determine final dev_head_t
if isinstance(use, int) or use is None:
used = np.where(hpi_gofs >= gof_limit)[0]
if len(used) < 3:
gofs = ", ".join(f"{g:.3f}" for g in hpi_gofs)
raise RuntimeError(
f"Only {len(used)} coil{_pl(used)} with goodness of fit >= {gof_limit}"
f", need at least 3 to refit HPI order (got {gofs})."
)
quat, _g = _fit_chpi_quat(hpi_dev[used], hpi_head[fit_order][used])
use_mask = _gof_use_mask(hpi_gofs, gof_limit=gof_limit)
quat, _g = _fit_chpi_quat(hpi_dev[use_mask], hpi_head[fit_order][use_mask])
fit_dev_head_t = _quat_to_affine(quat)
hpi_head_got = apply_trans(fit_dev_head_t, hpi_dev)
dists = np.linalg.norm(hpi_head_got - hpi_head[fit_order], axis=1)
dist_str = " ".join(f"{dist * 1e3:.1f}" for dist in dists)
logger.info(f" Coil distances after initial fit: {dist_str} mm")
good_dists_idx = np.where(dists[used] <= dist_limit)[0]
good_dists_idx = np.where(dists[use_mask] <= dist_limit)[0]
if not len(good_dists_idx) >= 3:
raise RuntimeError(
f"Only {len(good_dists_idx)} coil{_pl(good_dists_idx)} have distance "
f"Only {len(good_dists_idx)} coil{_pl(good_dists_idx)} with distance "
f"<= {dist_limit * 1e3:.1f} mm, need at least 3 to refit HPI order "
f"(got distances: {np.round(1e3 * dists, 1)})."
)
used = used[good_dists_idx]
used = np.where(use_mask)[0][good_dists_idx]
if use is not None:
used = np.sort(used[np.argsort(hpi_gofs[used])[-use:]])
else:
Expand Down Expand Up @@ -1927,6 +1952,19 @@ def refit_hpi(
return info


def _gof_use_mask(hpi_gofs, *, gof_limit):
assert isinstance(hpi_gofs, np.ndarray) and hpi_gofs.ndim == 1
use_mask = hpi_gofs >= gof_limit
n_use = use_mask.sum()
if n_use < 3:
gofs = ", ".join(f"{g:.3f}" for g in hpi_gofs)
raise RuntimeError(
f"Only {n_use} coil{_pl(n_use)} with goodness of fit >= {gof_limit}"
f", need at least 3 to refit HPI order (got {gofs})."
)
return use_mask


def _sorted_hpi_dig(dig, *, kinds=(FIFF.FIFFV_POINT_HPI,)):
return sorted(
# need .get here because the hpi_result["dig_points"] does not set it
Expand Down
46 changes: 46 additions & 0 deletions mne/coreg.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import shutil
import stat
import sys
from copy import deepcopy
from functools import reduce
from glob import glob, iglob

Expand Down Expand Up @@ -1030,6 +1031,7 @@ def scale_mri(
annot=False,
*,
on_defects="raise",
mri_fiducials=None,
verbose=None,
):
"""Create a scaled copy of an MRI subject.
Expand All @@ -1056,6 +1058,13 @@ def scale_mri(
%(on_defects)s

.. versionadded:: 1.0
mri_fiducials : None | list of dict
If provided, these fiducials will be used as the originals to scale instead
of reading from ``{subject_from}/bem/{subject_from}-fiducials.fif``.
This is useful typically when using ``mne coreg`` or similar and you have
modified the MRI fiducials interactively, but have not saved them to disk.

.. versionadded:: 1.12
%(verbose)s

See Also
Expand All @@ -1078,6 +1087,28 @@ def scale_mri(
scale = scale[0] # speed up scaling conditionals using a singleton
elif scale.shape != (1,):
raise ValueError(f"scale must have shape (3,) or (1,), got {scale.shape}")
_validate_type(mri_fiducials, (list, tuple, None), "mri_fiducials")
if mri_fiducials is not None:
_check_option("len(mri_fiducials)", len(mri_fiducials), (3,))
want_fids = [
FIFF.FIFFV_POINT_LPA,
FIFF.FIFFV_POINT_NASION,
FIFF.FIFFV_POINT_RPA,
]
for fi, fid in enumerate(mri_fiducials):
fid_name = f"mri_fiducials[{fi}]"
_validate_type(fid, dict, fid_name)
for key in ("r", "coord_frame", "ident"):
if key not in fid:
raise ValueError(f"{fid_name} is missing the '{key}' key")
if fid["coord_frame"] != FIFF.FIFFV_COORD_MRI:
raise ValueError(
f"{fid_name}['coord_frame'] must be 'mri', got {fid['coord_frame']}"
)
_check_option(
f"{fid_name}['kind']", fid["kind"], (FIFF.FIFFV_POINT_CARDINAL,)
)
_check_option(f"{fid_name}['ident']", fid["ident"], (want_fids[fi],))

# make sure we have an empty target directory
dest = subject_dirname.format(subject=subject_to, subjects_dir=subjects_dir)
Expand Down Expand Up @@ -1127,6 +1158,21 @@ def scale_mri(
dest = fname.format(subject=subject_to, subjects_dir=subjects_dir)
write_fiducials(dest, pts, cframe, overwrite=True, verbose=False)

# It is redundant to put this after the paths["fid"] loop, but it's simple, faster
# enough, and cleaner to just write it here (rather than adding conditionals to find
# the correct file above etc.)
if mri_fiducials is not None:
dest = os.path.join(
bem_dirname.format(subjects_dir=subjects_dir, subject=subject_to),
f"{subject_to}-fiducials.fif",
)
use_mri_fiducials = deepcopy(mri_fiducials)
for fid in use_mri_fiducials:
fid["r"] = fid["r"] * scale
write_fiducials(
dest, use_mri_fiducials, FIFF.FIFFV_COORD_MRI, overwrite=True, verbose=False
)

logger.debug("MRIs [nibabel]")
os.mkdir(mri_dirname.format(subjects_dir=subjects_dir, subject=subject_to))
for fname in paths["mri"]:
Expand Down
4 changes: 2 additions & 2 deletions mne/datasets/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@
# update the checksum in the MNE_DATASETS dict below, and change version
# here: ↓↓↓↓↓↓↓↓
RELEASES = dict(
testing="0.169",
testing="0.170",
misc="0.27",
phantom_kit="0.2",
ucl_opm_auditory="0.2",
Expand Down Expand Up @@ -115,7 +115,7 @@
# Testing and misc are at the top as they're updated most often
MNE_DATASETS["testing"] = dict(
archive_name=f"{TESTING_VERSIONED}.tar.gz",
hash="md5:bb0524db8605e96fde6333893a969766",
hash="md5:ebd873ea89507cf5a75043f56119d22b",
url=(
"https://codeload.github.com/mne-tools/mne-testing-data/"
f"tar.gz/{RELEASES['testing']}"
Expand Down
2 changes: 2 additions & 0 deletions mne/evoked.py
Original file line number Diff line number Diff line change
Expand Up @@ -2006,6 +2006,8 @@ def _write_evokeds(fname, evoked, check=True, *, on_mismatch="raise", overwrite=

if not isinstance(evoked, list | tuple):
evoked = [evoked]
if not len(evoked):
raise ValueError("No evoked data to write")

warned = False
# Create the file and save the essentials
Expand Down
Loading