Skip to content

Utility Tools

The tit.tools module provides standalone utilities for mesh and field manipulation, format conversion, electrode mapping, and visualization. These tools are typically called from scripts, the CLI, or other pipeline stages rather than forming a pipeline of their own.

Mesh / NIfTI Conversion

Mesh to NIfTI

Convert SimNIBS .msh mesh files to NIfTI volumes in subject or MNI space:

from tit.tools.mesh2nii import msh_to_nifti, msh_to_mni, convert_mesh_dir

# Single mesh to subject-space NIfTI
msh_to_nifti(
    mesh_path="TI_max.msh",
    m2m_dir="/data/project/derivatives/SimNIBS/sub-001/m2m_001",
    output_path="/data/output/TI_max_subject",
    fields=["TI_max"],  # optional: convert only specific fields
)

# Single mesh to MNI-space NIfTI
msh_to_mni(
    mesh_path="TI_max.msh",
    m2m_dir="/data/project/derivatives/SimNIBS/sub-001/m2m_001",
    output_path="/data/output/TI_max_MNI",
)

# Batch-convert all meshes in a directory (both subject and MNI space)
convert_mesh_dir(
    mesh_dir="/data/project/derivatives/SimNIBS/sub-001/Simulations/motor/TI/mesh",
    output_dir="/data/output/niftis",
    m2m_dir="/data/project/derivatives/SimNIBS/sub-001/m2m_001",
    fields=["TI_max"],
    skip_patterns=["normal"],  # skip surface-only meshes (default)
)

NIfTI to Mesh

Convert a NIfTI segmentation or mask to a surface mesh (.stl or .msh) using marching cubes:

from tit.tools.nifti_to_mesh import nifti_to_mesh

result = nifti_to_mesh(
    input_file="segmentation.nii.gz",
    output_file="thalamus.stl",       # .stl or .msh; defaults to .stl
    clean_components=True,             # remove small disconnected pieces
    clean_threshold=0.1,              # keep components >= 10% of largest
)

print(result)
# {'vertices': 12345, 'faces': 24680, 'output_file': 'thalamus.stl', 'removed_components': 3}

Field Extraction

Extract grey-matter and white-matter sub-meshes from a full SimNIBS head mesh:

from tit.tools.field_extract import main as extract_fields

extract_fields(
    input_file="/data/sims/TI_max.msh",
    project_dir="/data/my_project",
    subject_id="001",
    # gm_output_file and wm_output_file default to BIDS-compliant paths
)

Visualization

Montage Visualizer

Render a PNG showing electrode positions and connection arcs on the EEG cap template:

from tit.tools.montage_visualizer import visualize_montage

visualize_montage(
    montage_name="motor_cortex",
    electrode_pairs=[["E030", "E020"], ["E095", "E070"]],
    eeg_net="GSN-HydroCel-185.csv",
    output_dir="/data/output/montage_imgs",
    sim_mode="U",  # "U" = one image per montage, "M" = combined image
)

Gmsh Option Files

Generate .opt files for Gmsh visualization of mesh fields:

from tit.tools.gmsh_opt import create_mesh_opt_file

opt_path = create_mesh_opt_file(
    mesh_path="/data/sims/TI_max.msh",
    field_info={
        "fields": ["TI_max", "magnE"],
        "max_values": {"TI_max": 0.25, "magnE": 0.5},
    },
)
# writes /data/sims/TI_max.msh.opt

Atlas and Labels

Label Extraction

Extract specific label values from a NIfTI segmentation file:

from tit.tools.extract_labels import extract_labels

output = extract_labels(
    input_file="atlas_parcellation.nii.gz",
    labels=[10, 11, 12],              # label integers to keep
    output_file="extracted.nii.gz",   # optional; defaults to *_extracted suffix
)

FreeSurfer Annotation Reading

Read and inspect FreeSurfer .annot annotation files:

from tit.tools.read_annot import read_annot_file

# Print all regions and optionally look up a specific vertex
read_annot_file(
    annot_path="/data/freesurfer/sub-001/label/lh.aparc.annot",
    vertex_id=1024,  # optional
)

Electrode Mapping

Map optimized electrode positions to the nearest positions in a physical EEG net using the Hungarian algorithm:

from tit.tools.map_electrodes import (
    read_csv_positions,
    load_electrode_positions_json,
    map_electrodes_to_net,
    save_mapping_result,
    log_mapping_summary,
)

# Load net positions from SimNIBS CSV
net_positions, net_labels = read_csv_positions("GSN-HydroCel-256.csv")

# Load optimized positions from flex-search output
opt_positions, channel_indices = load_electrode_positions_json(
    "electrode_positions.json"
)

# Compute optimal assignment
mapping = map_electrodes_to_net(
    opt_positions, net_positions, net_labels, channel_indices
)

# Save and log
save_mapping_result(mapping, "mapping_result.json", eeg_net_name="GSN-HydroCel-256")
log_mapping_summary(mapping)

Other Utilities

Version Checking

Check GitHub for newer TI-Toolbox releases:

from tit.tools.check_for_update import check_for_new_version

newer = check_for_new_version(
    current_version="2.2.0",
    repo="idossha/TI-Toolbox",  # default
    timeout=2.0,                # seconds, default
)
if newer:
    print(f"New version available: {newer}")

API Reference

Mesh to NIfTI

tit.tools.mesh2nii.msh_to_nifti

msh_to_nifti(mesh_path: str, m2m_dir: str, output_path: str, fields: list[str] | None = None) -> None

Convert a mesh file to subject-space NIfTI.

Parameters

mesh_path : str Path to the .msh file. m2m_dir : str Path to the m2m_{subject} directory (used as reference grid). output_path : str Output file prefix. SimNIBS appends the field name (e.g. prefix_magnE.nii.gz). fields : list[str] | None If given, only these fields are written. Otherwise all fields in the mesh are converted.

Source code in tit/tools/mesh2nii.py
def msh_to_nifti(
    mesh_path: str,
    m2m_dir: str,
    output_path: str,
    fields: list[str] | None = None,
) -> None:
    """Convert a mesh file to subject-space NIfTI.

    Parameters
    ----------
    mesh_path : str
        Path to the ``.msh`` file.
    m2m_dir : str
        Path to the ``m2m_{subject}`` directory (used as reference grid).
    output_path : str
        Output file prefix.  SimNIBS appends the field name
        (e.g. ``prefix_magnE.nii.gz``).
    fields : list[str] | None
        If given, only these fields are written.  Otherwise all fields in the
        mesh are converted.
    """
    mesh = mesh_io.read_msh(mesh_path)
    if fields:
        mesh = _filter_mesh_fields(mesh, fields)
    transformations.interpolate_to_volume(mesh, m2m_dir, output_path)

tit.tools.mesh2nii.msh_to_mni

msh_to_mni(mesh_path: str, m2m_dir: str, output_path: str, fields: list[str] | None = None) -> None

Convert a mesh file to MNI-space NIfTI.

Parameters

mesh_path : str Path to the .msh file. m2m_dir : str Path to the m2m_{subject} directory. output_path : str Output file prefix. SimNIBS appends the field name (e.g. prefix_magnE.nii.gz). fields : list[str] | None If given, only these fields are written.

Source code in tit/tools/mesh2nii.py
def msh_to_mni(
    mesh_path: str,
    m2m_dir: str,
    output_path: str,
    fields: list[str] | None = None,
) -> None:
    """Convert a mesh file to MNI-space NIfTI.

    Parameters
    ----------
    mesh_path : str
        Path to the ``.msh`` file.
    m2m_dir : str
        Path to the ``m2m_{subject}`` directory.
    output_path : str
        Output file prefix.  SimNIBS appends the field name
        (e.g. ``prefix_magnE.nii.gz``).
    fields : list[str] | None
        If given, only these fields are written.
    """
    if fields:
        mesh = mesh_io.read_msh(mesh_path)
        mesh = _filter_mesh_fields(mesh, fields)
        mesh_path = _write_temp_mesh(mesh)
    transformations.warp_volume(mesh_path, m2m_dir, output_path)

tit.tools.mesh2nii.convert_mesh_dir

convert_mesh_dir(mesh_dir: str, output_dir: str, m2m_dir: str, fields: list[str] | None = None, skip_patterns: list[str] | None = None) -> None

Batch-convert every .msh file in mesh_dir to NIfTI.

For each mesh two NIfTI sets are produced:

  • {basename}_subject_{field}.nii.gz – subject space
  • {basename}_MNI_{field}.nii.gz – MNI space

Parameters

mesh_dir : str Directory containing .msh files. output_dir : str Where the NIfTI files are written. m2m_dir : str Path to the m2m_{subject} directory. fields : list[str] | None If given, only these fields are converted. skip_patterns : list[str] | None Basenames containing any of these substrings are skipped. Defaults to ["normal"] (surface-only meshes have no volume elements).

Source code in tit/tools/mesh2nii.py
def convert_mesh_dir(
    mesh_dir: str,
    output_dir: str,
    m2m_dir: str,
    fields: list[str] | None = None,
    skip_patterns: list[str] | None = None,
) -> None:
    """Batch-convert every ``.msh`` file in *mesh_dir* to NIfTI.

    For each mesh two NIfTI sets are produced:

    * ``{basename}_subject_{field}.nii.gz``  – subject space
    * ``{basename}_MNI_{field}.nii.gz``      – MNI space

    Parameters
    ----------
    mesh_dir : str
        Directory containing ``.msh`` files.
    output_dir : str
        Where the NIfTI files are written.
    m2m_dir : str
        Path to the ``m2m_{subject}`` directory.
    fields : list[str] | None
        If given, only these fields are converted.
    skip_patterns : list[str] | None
        Basenames containing any of these substrings are skipped.
        Defaults to ``["normal"]`` (surface-only meshes have no volume
        elements).
    """
    if skip_patterns is None:
        skip_patterns = ["normal"]

    os.makedirs(output_dir, exist_ok=True)

    msh_files = sorted(f for f in os.listdir(mesh_dir) if f.endswith(".msh"))
    if not msh_files:
        logger.warning("No .msh files found in %s", mesh_dir)
        return

    for fname in msh_files:
        base = os.path.splitext(fname)[0]
        if any(p in base for p in skip_patterns):
            logger.debug("Skipping surface mesh: %s", fname)
            continue

        mesh_path = os.path.join(mesh_dir, fname)
        mesh = mesh_io.read_msh(mesh_path)

        # When filtering fields, write a temp mesh for warp_volume
        # (it only accepts file paths, not in-memory mesh objects).
        warp_path = mesh_path
        if fields:
            mesh = _filter_mesh_fields(mesh, fields)
            warp_path = _write_temp_mesh(mesh)

        subject_prefix = os.path.join(output_dir, f"{base}_subject")
        mni_prefix = os.path.join(output_dir, f"{base}_MNI")

        logger.debug("Converting %s → subject space", fname)
        transformations.interpolate_to_volume(mesh, m2m_dir, subject_prefix)

        logger.debug("Converting %s → MNI space", fname)
        transformations.warp_volume(warp_path, m2m_dir, mni_prefix)

        if warp_path != mesh_path:
            os.unlink(warp_path)

    logger.info("NIfTI conversion complete: %s", output_dir)

NIfTI to Mesh

tit.tools.nifti_to_mesh.nifti_to_mesh

nifti_to_mesh(input_file, output_file=None, clean_components=False, clean_threshold=0.1)

Convert a NIfTI segmentation/mask to a surface mesh.

Parameters

input_file : str or Path Path to input NIfTI file output_file : str or Path, optional Path to output mesh file (.stl or .msh). If None, defaults to input_file with .stl extension clean_components : bool Whether to remove small disconnected components clean_threshold : float Minimum size threshold for component removal (as fraction of largest component)

Returns

dict Dictionary with mesh statistics: {'vertices': int, 'faces': int, 'output_file': str}

Source code in tit/tools/nifti_to_mesh.py
def nifti_to_mesh(
    input_file, output_file=None, clean_components=False, clean_threshold=0.1
):
    """
    Convert a NIfTI segmentation/mask to a surface mesh.

    Parameters
    ----------
    input_file : str or Path
        Path to input NIfTI file
    output_file : str or Path, optional
        Path to output mesh file (.stl or .msh). If None, defaults to input_file with .stl extension
    clean_components : bool
        Whether to remove small disconnected components
    clean_threshold : float
        Minimum size threshold for component removal (as fraction of largest component)

    Returns
    -------
    dict
        Dictionary with mesh statistics: {'vertices': int, 'faces': int, 'output_file': str}
    """
    input_path = Path(input_file)

    if output_file is None:
        output_file = input_path.parent / f"{input_path.stem}.stl"

    output_path = Path(output_file)

    # Validate output format
    if output_path.suffix.lower() not in [".stl", ".msh"]:
        raise ValueError("Output file must have .stl or .msh extension")

    # Load NIfTI
    img = nib.load(str(input_path))
    data = img.get_fdata()
    affine = img.affine

    # Create binary mask (any non-zero value)
    mask = data > 0

    # Remove small disconnected components if requested
    removed_components = 0
    if clean_components:
        mask, removed_components = remove_small_components(
            mask, threshold=clean_threshold
        )

    # Run marching cubes
    verts, faces, normals, _ = measure.marching_cubes(mask, level=0.5)

    # Transform vertices to world coordinates (RAS)
    verts_world = nib.affines.apply_affine(affine, verts)

    # Save based on extension
    if output_path.suffix.lower() == ".stl":
        save_stl(verts_world, faces, str(output_path))
    else:  # .msh
        save_gmsh(verts_world, faces, str(output_path))

    return {
        "vertices": len(verts_world),
        "faces": len(faces),
        "output_file": str(output_path),
        "removed_components": removed_components,
    }

tit.tools.nifti_to_mesh.remove_small_components

remove_small_components(mask, threshold=0.1)

Remove connected components smaller than threshold * largest component.

Parameters

mask : ndarray Binary mask array threshold : float Minimum size threshold as fraction of largest component (0-1)

Returns

ndarray Cleaned binary mask

Source code in tit/tools/nifti_to_mesh.py
def remove_small_components(mask, threshold=0.1):
    """
    Remove connected components smaller than threshold * largest component.

    Parameters
    ----------
    mask : ndarray
        Binary mask array
    threshold : float
        Minimum size threshold as fraction of largest component (0-1)

    Returns
    -------
    ndarray
        Cleaned binary mask
    """
    labeled, num_features = ndimage.label(mask)
    if num_features <= 1:
        return mask

    # Get size of each component
    component_sizes = ndimage.sum(mask, labeled, range(1, num_features + 1))
    max_size = component_sizes.max()
    min_size = max_size * threshold

    # Keep only components above threshold
    keep_labels = np.where(component_sizes >= min_size)[0] + 1
    cleaned = np.isin(labeled, keep_labels)

    return cleaned, num_features - len(keep_labels)

tit.tools.nifti_to_mesh.save_stl

save_stl(verts, faces, filename)

Save mesh as binary STL format.

Source code in tit/tools/nifti_to_mesh.py
def save_stl(verts, faces, filename):
    """Save mesh as binary STL format."""

    from stl import mesh as stl_mesh

    surface = stl_mesh.Mesh(np.zeros(faces.shape[0], dtype=stl_mesh.Mesh.dtype))
    for i, f in enumerate(faces):
        for j in range(3):
            surface.vectors[i][j] = verts[f[j], :]

    surface.save(filename)

tit.tools.nifti_to_mesh.save_gmsh

save_gmsh(verts, faces, filename)

Save mesh as Gmsh .msh format (v2.2 ASCII).

Source code in tit/tools/nifti_to_mesh.py
def save_gmsh(verts, faces, filename):
    """Save mesh as Gmsh .msh format (v2.2 ASCII)."""
    with open(filename, "w") as f:
        # Header
        f.write("$MeshFormat\n2.2 0 8\n$EndMeshFormat\n")

        # Nodes
        f.write("$Nodes\n")
        f.write(f"{len(verts)}\n")
        for i, v in enumerate(verts, 1):
            f.write(f"{i} {v[0]} {v[1]} {v[2]}\n")
        f.write("$EndNodes\n")

        # Elements (triangles = type 2)
        f.write("$Elements\n")
        f.write(f"{len(faces)}\n")
        for i, face in enumerate(faces, 1):
            # elem_id, elem_type(2=tri), num_tags, tag1, tag2, node1, node2, node3
            f.write(f"{i} 2 2 1 1 {face[0]+1} {face[1]+1} {face[2]+1}\n")
        f.write("$EndElements\n")

Field Extraction

tit.tools.field_extract.main

main(input_file, project_dir=None, subject_id=None, gm_output_file=None, wm_output_file=None)

Load the original mesh, crop to grey matter (tag #2) and white matter (tag #1), and save to separate files.

Directory structure (BIDS-compliant): project_dir/ ├── sub-{subject_id}/ └── derivatives/ └── SimNIBS/ └── sub-{subject_id}/ ├── m2m_{subject_id}/ └── Simulations/

Parameters

input_file : str Path to the input mesh file. project_dir : str, optional Path to the project directory (BIDS structure). subject_id : str, optional Subject ID (without "sub-" prefix). gm_output_file : str, optional Path to the output grey matter mesh file. wm_output_file : str, optional Path to the output white matter mesh file.

Source code in tit/tools/field_extract.py
def main(
    input_file,
    project_dir=None,
    subject_id=None,
    gm_output_file=None,
    wm_output_file=None,
):
    """
    Load the original mesh, crop to grey matter (tag #2) and white matter
    (tag #1), and save to separate files.

    Directory structure (BIDS-compliant):
    project_dir/
    ├── sub-{subject_id}/
    └── derivatives/
        └── SimNIBS/
            └── sub-{subject_id}/
                ├── m2m_{subject_id}/
                └── Simulations/

    Parameters
    ----------
    input_file : str
        Path to the input mesh file.
    project_dir : str, optional
        Path to the project directory (BIDS structure).
    subject_id : str, optional
        Subject ID (without "sub-" prefix).
    gm_output_file : str, optional
        Path to the output grey matter mesh file.
    wm_output_file : str, optional
        Path to the output white matter mesh file.
    """
    full_mesh = mesh_io.read_msh(input_file)
    gm_mesh = full_mesh.crop_mesh(tags=[2])
    wm_mesh = full_mesh.crop_mesh(tags=[1])

    if project_dir and subject_id:
        derivatives_dir = os.path.join(project_dir, "derivatives")
        simnibs_dir = os.path.join(derivatives_dir, "SimNIBS", f"sub-{subject_id}")
        output_base = os.path.join(simnibs_dir, "Simulations")
        os.makedirs(output_base, exist_ok=True)
        input_filename = os.path.basename(input_file)

        if gm_output_file is None:
            gm_output_file = os.path.join(
                output_base, f"sub-{subject_id}_space-MNI305_desc-grey_{input_filename}"
            )
        if wm_output_file is None:
            wm_output_file = os.path.join(
                output_base,
                f"sub-{subject_id}_space-MNI305_desc-white_{input_filename}",
            )
    else:
        # Use original directory structure
        input_dir = os.path.dirname(input_file)
        input_filename = os.path.basename(input_file)

        if gm_output_file is None:
            gm_output_file = os.path.join(input_dir, "grey_" + input_filename)
        if wm_output_file is None:
            wm_output_file = os.path.join(input_dir, "white_" + input_filename)

    # Save grey matter mesh
    mesh_io.write_msh(gm_mesh, gm_output_file)
    logger.info("Grey matter mesh saved to %s", gm_output_file)

    # Save white matter mesh
    mesh_io.write_msh(wm_mesh, wm_output_file)
    logger.info("White matter mesh saved to %s", wm_output_file)

Montage Visualizer

tit.tools.montage_visualizer.visualize_montage

visualize_montage(montage_name: str, electrode_pairs: list[list[str]], eeg_net: str, output_dir: str, sim_mode: str = 'U') -> None

Render a PNG showing electrode positions and connection arcs.

Parameters

montage_name : used as output filename base (unipolar) or "combined" (multipolar) electrode_pairs : list of [e1, e2] pairs, e.g. [["E030","E020"],["E095","E070"]] eeg_net : EEG cap name, e.g. "GSN-HydroCel-185.csv" output_dir : directory to write PNG(s) into sim_mode : "U" → one image per montage; "M" → single combined image

Source code in tit/tools/montage_visualizer.py
def visualize_montage(
    montage_name: str,
    electrode_pairs: list[list[str]],
    eeg_net: str,
    output_dir: str,
    sim_mode: str = "U",
) -> None:
    """
    Render a PNG showing electrode positions and connection arcs.

    Parameters
    ----------
    montage_name    : used as output filename base (unipolar) or "combined" (multipolar)
    electrode_pairs : list of [e1, e2] pairs, e.g. [["E030","E020"],["E095","E070"]]
    eeg_net         : EEG cap name, e.g. "GSN-HydroCel-185.csv"
    output_dir      : directory to write PNG(s) into
    sim_mode        : "U" → one image per montage; "M" → single combined image
    """
    if eeg_net in _SKIP_NETS:
        return

    coords = _load_coordinates(eeg_net)

    template = os.path.join(_RESOURCES_DIR, "GSN-256.png")
    os.makedirs(output_dir, exist_ok=True)

    if sim_mode == "U":
        out_image = os.path.join(
            output_dir, f"{montage_name}_highlighted_visualization.png"
        )
        subprocess.run(["cp", template, out_image], check=True)
    else:
        out_image = os.path.join(output_dir, "combined_montage_visualization.png")
        if not os.path.exists(out_image):
            subprocess.run(["cp", template, out_image], check=True)

    for i, pair in enumerate(electrode_pairs):
        e1, e2 = pair
        color = _COLORS[i % len(_COLORS)]
        ring = os.path.join(_RESOURCES_DIR, _RINGS[i % len(_RINGS)])
        if e1 in coords:
            _overlay_ring(out_image, *coords[e1], color, ring)
        if e2 in coords:
            _overlay_ring(out_image, *coords[e2], color, ring)
        if e1 in coords and e2 in coords:
            _draw_arc(out_image, *coords[e1], *coords[e2], color)

Gmsh Options

tit.tools.gmsh_opt.create_mesh_opt_file

create_mesh_opt_file(mesh_path, field_info=None)

Create a .opt file for Gmsh visualization of mesh fields.

Parameters

mesh_path : str Path to the .msh file (without .opt extension) field_info : dict, optional Dictionary with field information containing: - 'fields': list of field names to visualize - 'max_values': dict mapping field names to their max values (optional) - 'field_type': str, either 'node' or 'element' (default: 'node')

Source code in tit/tools/gmsh_opt.py
def create_mesh_opt_file(mesh_path, field_info=None):
    """
    Create a .opt file for Gmsh visualization of mesh fields.

    Parameters
    ----------
    mesh_path : str
        Path to the .msh file (without .opt extension)
    field_info : dict, optional
        Dictionary with field information containing:
        - 'fields': list of field names to visualize
        - 'max_values': dict mapping field names to their max values (optional)
        - 'field_type': str, either 'node' or 'element' (default: 'node')
    """
    if field_info is None:
        field_info = {}

    fields = field_info.get("fields", [])
    max_values = field_info.get("max_values", {})

    opt_content = """// Gmsh visualization settings for mesh fields
Mesh.SurfaceFaces = 0;
Mesh.SurfaceEdges = 0;
Mesh.Points = 0;
Mesh.Lines = 0;

"""

    for i, field_name in enumerate(fields):
        view_index = i + 1
        max_value = max_values.get(field_name, 1.0)
        opt_content += f"""// View[{view_index}] ({field_name})
View[{view_index}].Visible = 1;
View[{view_index}].ColormapNumber = {i + 1};
View[{view_index}].RangeType = 2;
View[{view_index}].CustomMin = 0;
View[{view_index}].CustomMax = {max_value};
View[{view_index}].ShowScale = 1;
View[{view_index}].ColormapAlpha = 1;
View[{view_index}].ColormapAlphaPower = 0.08;

"""

    opt_content += "// Field information:\n"
    for i, field_name in enumerate(fields):
        max_value = max_values.get(field_name, 1.0)
        opt_content += f"// View[{i + 1}]: {field_name} (max: {max_value:.6f})\n"

    opt_path = f"{mesh_path}.opt"
    with open(opt_path, "w") as f:
        f.write(opt_content)

    return opt_path

Label Extraction

tit.tools.extract_labels.extract_labels

extract_labels(input_file, labels, output_file=None)

Extract specific labels from a NIfTI segmentation file.

Parameters

input_file : str or Path Path to input NIfTI file labels : list of int List of label values to extract output_file : str or Path, optional Path to output NIfTI file. If None, defaults to input_file with '_extracted' suffix

Returns

str Path to the output file

Source code in tit/tools/extract_labels.py
def extract_labels(input_file, labels, output_file=None):
    """
    Extract specific labels from a NIfTI segmentation file.

    Parameters
    ----------
    input_file : str or Path
        Path to input NIfTI file
    labels : list of int
        List of label values to extract
    output_file : str or Path, optional
        Path to output NIfTI file. If None, defaults to input_file with '_extracted' suffix

    Returns
    -------
    str
        Path to the output file
    """
    input_path = Path(input_file)

    if output_file is None:
        output_file = (
            input_path.parent / f"{input_path.stem}_extracted{input_path.suffix}"
        )

    output_path = Path(output_file)

    img = nib.load(str(input_path))
    data = img.get_fdata()

    mask = np.isin(data, labels)
    output_data = np.where(mask, data, 0).astype(np.int16)

    out_img = nib.Nifti1Image(output_data, img.affine, img.header)
    nib.save(out_img, str(output_path))

    return str(output_path)

FreeSurfer Annotations

tit.tools.read_annot.read_annot_file

read_annot_file(annot_path, vertex_id=None)
Source code in tit/tools/read_annot.py
def read_annot_file(annot_path, vertex_id=None):
    # Load annotation data
    labels, ctab, names = fsio.read_annot(annot_path)

    print(f"\nLoaded .annot file: {annot_path}")
    print(f"Number of vertices: {len(labels)}")
    print(f"Number of regions: {len(names)}\n")

    # Print all region names and their colors
    print("Regions and RGBA colors:")
    for i, name in enumerate(names):
        color = ctab[i][:4]  # R, G, B, A
        print(f"  {i:2d}: {name.decode('utf-8')} | Color: {color}")

    # If a specific vertex is given, show its label
    if vertex_id is not None:
        if 0 <= vertex_id < len(labels):
            label_val = labels[vertex_id]
            try:
                index = list(ctab[:, -1]).index(label_val)
                region_name = names[index].decode("utf-8")
                print(f"\nVertex {vertex_id} belongs to region: {region_name}")
            except ValueError:
                print(f"\nVertex {vertex_id} has an unknown label value: {label_val}")
        else:
            print(f"\nVertex ID {vertex_id} is out of range (0 to {len(labels) - 1})")

Electrode Mapping

tit.tools.map_electrodes.read_csv_positions

read_csv_positions(csv_path)

Read electrode positions from a CSV file using SimNIBS utilities.

Parameters

csv_path : str Path to the file containing electrode positions.

Returns

positions : np.ndarray Array of electrode positions (Nx3). labels : list List of electrode labels/names.

Source code in tit/tools/map_electrodes.py
def read_csv_positions(csv_path):
    """
    Read electrode positions from a CSV file using SimNIBS utilities.

    Parameters
    ----------
    csv_path : str
        Path to the file containing electrode positions.

    Returns
    -------
    positions : np.ndarray
        Array of electrode positions (Nx3).
    labels : list
        List of electrode labels/names.
    """
    from simnibs.utils.csv_reader import read_csv_positions as simnibs_read_csv

    # Use SimNIBS's CSV reader
    type_, coordinates, extra, name, extra_cols, header = simnibs_read_csv(csv_path)

    # Extract positions and names for electrodes only
    positions = []
    labels = []

    for t, coord, n in zip(type_, coordinates, name):
        if t in ["Electrode", "ReferenceElectrode"]:
            positions.append(coord)
            labels.append(n if n else f"E{len(positions)}")

    return np.array(positions), labels

tit.tools.map_electrodes.load_electrode_positions_json

load_electrode_positions_json(json_path)

Load optimized electrode positions from electrode_positions.json.

Parameters

json_path : str Path to the electrode_positions.json file.

Returns

positions : np.ndarray Array of optimized electrode positions (Nx3). channel_array_indices : list List of [channel, array] indices for each electrode.

Source code in tit/tools/map_electrodes.py
def load_electrode_positions_json(json_path):
    """
    Load optimized electrode positions from electrode_positions.json.

    Parameters
    ----------
    json_path : str
        Path to the electrode_positions.json file.

    Returns
    -------
    positions : np.ndarray
        Array of optimized electrode positions (Nx3).
    channel_array_indices : list
        List of [channel, array] indices for each electrode.
    """
    with open(json_path, "r") as f:
        data = json.load(f)

    positions = np.array(data["optimized_positions"])
    channel_array_indices = data["channel_array_indices"]

    return positions, channel_array_indices

tit.tools.map_electrodes.map_electrodes_to_net

map_electrodes_to_net(optimized_positions, net_positions, net_labels, channel_array_indices)

Map optimized electrode positions to nearest EEG net positions using the Hungarian algorithm for optimal assignment.

Parameters

optimized_positions : np.ndarray Array of optimized electrode positions (Nx3). net_positions : np.ndarray Array of EEG net electrode positions (Mx3). net_labels : list List of EEG net electrode labels. channel_array_indices : list List of [channel, array] indices for each optimized electrode.

Returns

mapping_result : dict Dictionary containing: - optimized_positions: Original optimized positions - mapped_positions: Corresponding net positions - mapped_labels: Labels of mapped net electrodes - distances: Distances between optimized and mapped positions - channel_array_indices: Channel and array indices

Source code in tit/tools/map_electrodes.py
def map_electrodes_to_net(
    optimized_positions, net_positions, net_labels, channel_array_indices
):
    """
    Map optimized electrode positions to nearest EEG net positions using
    the Hungarian algorithm for optimal assignment.

    Parameters
    ----------
    optimized_positions : np.ndarray
        Array of optimized electrode positions (Nx3).
    net_positions : np.ndarray
        Array of EEG net electrode positions (Mx3).
    net_labels : list
        List of EEG net electrode labels.
    channel_array_indices : list
        List of [channel, array] indices for each optimized electrode.

    Returns
    -------
    mapping_result : dict
        Dictionary containing:
        - optimized_positions: Original optimized positions
        - mapped_positions: Corresponding net positions
        - mapped_labels: Labels of mapped net electrodes
        - distances: Distances between optimized and mapped positions
        - channel_array_indices: Channel and array indices
    """
    logger.info("Calculating distance matrix...")
    distance_matrix = np.array(
        [
            [np.linalg.norm(opt_pos - net_pos) for net_pos in net_positions]
            for opt_pos in optimized_positions
        ]
    )

    logger.info("Finding optimal electrode assignment using Hungarian algorithm...")
    row_ind, col_ind = linear_sum_assignment(distance_matrix)

    mapping_result = {
        "optimized_positions": [optimized_positions[i].tolist() for i in row_ind],
        "mapped_positions": [net_positions[j].tolist() for j in col_ind],
        "mapped_labels": [net_labels[j] for j in col_ind],
        "distances": [distance_matrix[i, j] for i, j in zip(row_ind, col_ind)],
        "channel_array_indices": [channel_array_indices[i] for i in row_ind],
    }

    return mapping_result

tit.tools.map_electrodes.save_mapping_result

save_mapping_result(mapping_result, output_path, eeg_net_name=None)

Save mapping result to a JSON file.

Parameters

mapping_result : dict Dictionary containing mapping information. output_path : str Path where to save the JSON file. eeg_net_name : str, optional Name of the EEG net file used.

Source code in tit/tools/map_electrodes.py
def save_mapping_result(mapping_result, output_path, eeg_net_name=None):
    """
    Save mapping result to a JSON file.

    Parameters
    ----------
    mapping_result : dict
        Dictionary containing mapping information.
    output_path : str
        Path where to save the JSON file.
    eeg_net_name : str, optional
        Name of the EEG net file used.
    """
    json_data = mapping_result.copy()
    if eeg_net_name:
        json_data["eeg_net"] = eeg_net_name

    with open(output_path, "w") as f:
        json.dump(json_data, f, indent=2)

    logger.info("Mapping data saved to: %s", output_path)

tit.tools.map_electrodes.log_mapping_summary

log_mapping_summary(mapping_result)

Log a summary of the electrode mapping results.

Parameters

mapping_result : dict Dictionary containing mapping information.

Source code in tit/tools/map_electrodes.py
def log_mapping_summary(mapping_result):
    """
    Log a summary of the electrode mapping results.

    Parameters
    ----------
    mapping_result : dict
        Dictionary containing mapping information.
    """
    total_distance = sum(mapping_result["distances"])
    avg_distance = (
        total_distance / len(mapping_result["distances"])
        if mapping_result["distances"]
        else 0
    )
    max_distance = (
        max(mapping_result["distances"]) if mapping_result["distances"] else 0
    )

    logger.info("=" * 80)
    logger.info("ELECTRODE MAPPING SUMMARY")
    logger.info("=" * 80)
    logger.info("Total electrodes mapped: %d", len(mapping_result["mapped_labels"]))
    logger.info("Average distance: %.2f mm", avg_distance)
    logger.info("Maximum distance: %.2f mm", max_distance)
    logger.info("Total distance: %.2f mm", total_distance)

    logger.info("Detailed mapping:")
    logger.info("-" * 80)
    logger.info(
        "%-5s %-8s %-6s %-15s %-15s",
        "Idx",
        "Channel",
        "Array",
        "Mapped Label",
        "Distance (mm)",
    )
    logger.info("-" * 80)

    for i, (label, dist, indices) in enumerate(
        zip(
            mapping_result["mapped_labels"],
            mapping_result["distances"],
            mapping_result["channel_array_indices"],
        )
    ):
        channel, array = indices
        logger.info("%-5d %-8s %-6s %-15s %-15.2f", i, channel, array, label, dist)

    logger.info("=" * 80)

Version Checking

tit.tools.check_for_update.check_for_new_version

check_for_new_version(current_version: str, repo: str = 'idossha/TI-Toolbox', timeout: float = 2.0) -> str | None

Check GitHub for the latest release. Returns version string if newer, else None.

Source code in tit/tools/check_for_update.py
def check_for_new_version(
    current_version: str,
    repo: str = "idossha/TI-Toolbox",
    timeout: float = 2.0,
) -> str | None:
    """Check GitHub for the latest release. Returns version string if newer, else None."""
    import requests

    url = f"https://api.github.com/repos/{repo}/releases/latest"
    response = requests.get(url, timeout=timeout)
    response.raise_for_status()
    data = response.json()
    latest = data.get("tag_name", "").lstrip("v")
    if latest and parse_version(latest) > parse_version(current_version):
        return latest
    return None

tit.tools.check_for_update.parse_version

parse_version(version_str: str) -> tuple

Parse a version string like '2.2.1' or 'v2.2.1' into a tuple of ints.

Source code in tit/tools/check_for_update.py
def parse_version(version_str: str) -> tuple:
    """Parse a version string like '2.2.1' or 'v2.2.1' into a tuple of ints."""
    parts = version_str.strip().lstrip("v").split(".")
    result = []
    for part in parts:
        try:
            result.append(int(part))
        except ValueError:
            pass
    return tuple(result)