diff --git a/examples/scripts/column-regions/column-regions.sh b/examples/scripts/column-regions/column-regions.sh
index fd8dea7..fd4a714 100755
--- a/examples/scripts/column-regions/column-regions.sh
+++ b/examples/scripts/column-regions/column-regions.sh
@@ -9,7 +9,7 @@
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
-python -m capsul highres_cortex.capsul.traverses \
+python -m capsul run highres_cortex.capsul.traverses \
classif=../classif.nii.gz \
goal_traverse_diameter=1.0 \
verbosity=1 \
diff --git a/examples/scripts/dist/distmaps.sh b/examples/scripts/dist/distmaps.sh
index 9de57db..e32304f 100755
--- a/examples/scripts/dist/distmaps.sh
+++ b/examples/scripts/dist/distmaps.sh
@@ -11,7 +11,7 @@
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
-python -m capsul highres_cortex.capsul.processes.Distmaps \
+python -m capsul run highres_cortex.capsul.processes.Distmaps \
classif=../classif.nii.gz \
distwhite=distwhite.nii.gz \
distCSF=distCSF.nii.gz \
diff --git a/examples/scripts/heat/heat.sh b/examples/scripts/heat/heat.sh
index 8ad1bc9..9c7b468 100755
--- a/examples/scripts/heat/heat.sh
+++ b/examples/scripts/heat/heat.sh
@@ -9,7 +9,7 @@
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
-python -m capsul highres_cortex.capsul.processes.Laplacian \
+python -m capsul run highres_cortex.capsul.processes.Laplacian \
classif=../classif.nii.gz \
verbosity=1 \
laplace_field=heat.nii.gz
diff --git a/examples/scripts/isovolume/isovolume.sh b/examples/scripts/isovolume/isovolume.sh
index 175d392..97a410c 100755
--- a/examples/scripts/isovolume/isovolume.sh
+++ b/examples/scripts/isovolume/isovolume.sh
@@ -9,7 +9,7 @@
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
-python -m capsul highres_cortex.capsul.isovolume \
+python -m capsul run highres_cortex.capsul.isovolume \
classif=../classif.nii.gz \
verbosity=1 \
advection_step_size=0.05 \
diff --git a/examples/scripts/laplace-euclidean/laplace-euclidean.sh b/examples/scripts/laplace-euclidean/laplace-euclidean.sh
index 8e29440..fcc63d6 100755
--- a/examples/scripts/laplace-euclidean/laplace-euclidean.sh
+++ b/examples/scripts/laplace-euclidean/laplace-euclidean.sh
@@ -9,7 +9,7 @@
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
-python -m capsul highres_cortex.capsul.thickness_adv \
+python -m capsul run highres_cortex.capsul.thickness_adv \
classif=../classif.nii.gz \
advection_step_size=0.05 \
verbosity=1 \
diff --git a/examples/scripts/upwind-euclidean/upwind-euclidean.sh b/examples/scripts/upwind-euclidean/upwind-euclidean.sh
index addbc3b..4aa32aa 100755
--- a/examples/scripts/upwind-euclidean/upwind-euclidean.sh
+++ b/examples/scripts/upwind-euclidean/upwind-euclidean.sh
@@ -9,7 +9,7 @@
# notice and this notice are preserved. This file is offered as-is,
# without any warranty.
-python -m capsul highres_cortex.capsul.thickness_upw \
+python -m capsul run highres_cortex.capsul.thickness_upw \
classif=../classif.nii.gz \
verbosity=1 \
thickness_image=total-length.nii.gz \
diff --git a/python/highres_cortex/capsul/filtered_sumcurvs.json b/python/highres_cortex/capsul/filtered_sumcurvs.json
new file mode 100644
index 0000000..4e89488
--- /dev/null
+++ b/python/highres_cortex/capsul/filtered_sumcurvs.json
@@ -0,0 +1,34 @@
+{
+ "type": "custom_pipeline",
+ "name": "filtered_sumcurvs",
+ "definition": {
+ "export_parameters": false,
+ "doc": "Compute the filtered sum of principal curvatures of isophote surfaces. This is equivalent to computing the divergence of the normalized gradient of the input scalar field. Note that a Gaussian smoothing of width sigma is first applied to the input image, in order to limit the appearance of local high curvature values (e.g. due to the discontinuity of second-order derivative at the borders of the cortex).",
+ "executables": {
+ "smoothing": {
+ "definition": "highres_cortex.capsul.processes.GaussianSmoothing",
+ "type": "process"
+ },
+ "sumcurvs": {
+ "definition": "highres_cortex.capsul.processes.IsoCurvature",
+ "type": "process",
+ "parameters": {
+ "mode": "sum"
+ }
+ }
+ },
+ "parameters": {
+ "sigma": 1.0,
+ "verbosity": 1
+ },
+ "links": [
+ "input->smoothing.input_image",
+ "sigma->smoothing.zsigma",
+ "sigma->smoothing.xsigma",
+ "sigma->smoothing.ysigma",
+ "verbosity->sumcurvs.verbosity",
+ "smoothing.output_image->sumcurvs.input_image",
+ "sumcurvs.output_image->output"
+ ]
+ }
+}
diff --git a/python/highres_cortex/capsul/filtered_sumcurvs.xml b/python/highres_cortex/capsul/filtered_sumcurvs.xml
deleted file mode 100644
index 08455fd..0000000
--- a/python/highres_cortex/capsul/filtered_sumcurvs.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-
-
-
- Compute the filtered sum of principal curvatures of isophote surfaces
-
- This is equivalent to computing the divergence of the normalized gradient
- of the input scalar field.
-
- .. note::
-
- A Gaussian smoothing of width sigma is first applied to the input
- image, in order to limit the appearance of local high curvature values
- (e.g. due to the discontinuity of second-order derivative at the
- borders of the cortex).
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/python/highres_cortex/capsul/isovolume.json b/python/highres_cortex/capsul/isovolume.json
new file mode 100644
index 0000000..055010d
--- /dev/null
+++ b/python/highres_cortex/capsul/isovolume.json
@@ -0,0 +1,93 @@
+{
+ "type": "custom_pipeline",
+ "name": "isovolume",
+ "definition": {
+ "export_parameters": false,
+ "doc": "Compute equivolumetric depth",
+ "executables": {
+ "laplace": {
+ "definition": "highres_cortex.capsul.processes.Laplacian",
+ "type": "process"
+ },
+ "filtered_sumcurvs": {
+ "definition": "highres_cortex.capsul.filtered_sumcurvs",
+ "type": "process"
+ },
+ "binarize_cortex": {
+ "definition": "highres_cortex.capsul.processes.BinarizeCortex",
+ "type": "process"
+ },
+ "advect_toward_pial": {
+ "definition": "highres_cortex.capsul.processes.AdvectTubesAlongGradient",
+ "type": "process",
+ "parameters": {
+ "upfield": true,
+ "domain_type": "interpolated"
+ }
+ },
+ "advect_toward_white": {
+ "definition": "highres_cortex.capsul.processes.AdvectTubesAlongGradient",
+ "type": "process",
+ "parameters": {
+ "domain_type": "interpolated",
+ "upfield": false
+ }
+ },
+ "total_tube_volume": {
+ "definition": "highres_cortex.capsul.processes.ImageArithmetic2Inputs",
+ "type": "process",
+ "parameters": {
+ "formula": "I1 + I2"
+ }
+ },
+ "pial_volume_fraction": {
+ "definition": "highres_cortex.capsul.processes.ImageArithmetic2Inputs",
+ "type": "process",
+ "parameters": {
+ "formula": "I1 / I2"
+ }
+ },
+ "postprocess": {
+ "definition": "highres_cortex.capsul.processes.PostProcessEquivolumetricDepth",
+ "type": "process"
+ }
+ },
+ "parameters": {
+ "advection_max_dist": 6.0,
+ "advection_step_size": 0.03,
+ "smoothing_sigma": 1.0,
+ "laplace_precision": 0.001,
+ "laplace_typical_cortical_thickness": 3.0,
+ "verbosity": 1
+ },
+ "links": [
+ "classif->laplace.classif",
+ "classif->postprocess.classif",
+ "classif->binarize_cortex.classif",
+ "verbosity->laplace.verbosity",
+ "verbosity->filtered_sumcurvs.verbosity",
+ "verbosity->advect_toward_pial.verbosity",
+ "verbosity->advect_toward_white.verbosity",
+ "laplace_precision->laplace.precision",
+ "laplace_typical_cortical_thickness->laplace.typical_cortical_thickness",
+ "smoothing_sigma->filtered_sumcurvs.sigma",
+ "advection_step_size->advect_toward_pial.step_size",
+ "advection_step_size->advect_toward_white.step_size",
+ "advection_max_dist->advect_toward_pial.max_dist",
+ "advection_max_dist->advect_toward_white.max_dist",
+ "laplace.laplace_field->filtered_sumcurvs.input",
+ "laplace.laplace_field->advect_toward_pial.grad_field",
+ "laplace.laplace_field->advect_toward_white.grad_field",
+ "filtered_sumcurvs.output->advect_toward_white.divergence",
+ "filtered_sumcurvs.output->advect_toward_pial.divergence",
+ "binarize_cortex.output_image->advect_toward_white.domain",
+ "binarize_cortex.output_image->advect_toward_pial.domain",
+ "advect_toward_pial.output_volumes->total_tube_volume.input_image_1",
+ "advect_toward_pial.output_volumes->pial_volume_fraction.input_image_1",
+ "advect_toward_white.output_volumes->total_tube_volume.input_image_2",
+ "total_tube_volume.output_image->pial_volume_fraction.input_image_2",
+ "pial_volume_fraction.output_image->postprocess.input_image",
+ "postprocess.output_image->equivolumetric_depth"
+ ]
+ }
+}
diff --git a/python/highres_cortex/capsul/isovolume.xml b/python/highres_cortex/capsul/isovolume.xml
deleted file mode 100644
index 121e06c..0000000
--- a/python/highres_cortex/capsul/isovolume.xml
+++ /dev/null
@@ -1,132 +0,0 @@
-
-
-
- Compute equivolumetric depth
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/python/highres_cortex/capsul/processes.py b/python/highres_cortex/capsul/processes.py
index 1f44b59..ba333f4 100644
--- a/python/highres_cortex/capsul/processes.py
+++ b/python/highres_cortex/capsul/processes.py
@@ -36,205 +36,200 @@
"""Elementary processes to be used within the CAPSUL pipelining system."""
-from __future__ import absolute_import, division, print_function
+import enum
import math
+import os
+import subprocess
import capsul.api
-import capsul.process.xml
-from traits.api import Bool, Enum, File, Float, Int, Str, Undefined
+from soma.controller import field, File, Literal, undefined
-VOLUME_EXTENSIONS = ['.nii.gz', '.vimg', '.vinfo', '.vhdr', '.img', '.hdr',
- '.v', '.i', '.mnc', '.mnc.gz', '.nii', '.jpg', '.gif',
- '.png', '.mng', '.bmp', '.pbm', '.pgm', '.ppm', '.xbm',
- '.xpm', '.tiff', '.tif', '.ima', '.dim', '']
-
-# External commands that use AIMS need to be launched through bv_env on Mac OS
-# 10.11 and later. The reason is that the DYLD_* environment variables, which
-# control the dynamic linker (used in the plugin system of AIMS), are not
-# allowed to be inherited by programs that reside in "protected" system
-# directories (/bin, /sbin, /usr, and /System). Capsul itself can usually work
-# without these variables, but processes that use soma-io cannot. Moreover,
-# even if these variables are set in the Capsul process, they cannot be
-# inherited by children processes written in Python because these use
-# /usr/bin/env in their shebang, which is "protected" (it resides in /usr).
+VOLUME_EXTENSIONS = ['.nii.gz', '.hdr', '.nii', '.ima']
class Laplacian(capsul.api.Process):
"""Solve the Laplacian model in the cortex"""
- classif = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="classification image of the cortex (100 inside, 0 in CSF, "
+ classif: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="classification image of the cortex (100 inside, 0 in CSF, "
"200 in white matter)")
- precision = Float(
- 0.001, output=False, optional=True,
- desc="target maximum relative error in first-order finite differences")
- typical_cortical_thickness = Float(
- 3, output=False, optional=True,
- desc="typical thickness of the cortex (mm), used for accelerating "
+ precision: float = field(
+ default=0.001,
+ doc="target maximum relative error in first-order finite differences")
+ typical_cortical_thickness: float = field(
+ default=3.0,
+ doc="typical thickness of the cortex (mm), used for accelerating "
"convergence")
- verbosity = Int(1, output=False, optional=True, desc="Verbosity level")
+ verbosity: int = field(default=1, desc="Verbosity level")
- laplace_field = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output pseudo-temperature field (from 0 in CSF to 1 in the "
+ laplace_field: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output pseudo-temperature field (from 0 in CSF to 1 in the "
"white matter)"
)
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"ylLaplacian",
"--classif", self.classif,
"--output", self.laplace_field,
"--precision", repr(self.precision),
"--typical-cortical-thickness",
repr(self.typical_cortical_thickness),
- "--verbose", str(self.verbosity)]
+ "--verbose", str(self.verbosity),
+ ]
+ subprocess.check_call(cmd)
class IsoCurvature(capsul.api.Process):
"""Compute the curvature of isosurfaces"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image volume (scalar field)")
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image volume (scalar field)")
# modes mean, geom, pri1, pri2 are currently unimplemented
- mode = Enum(
- "sum", output=False, optional=True,
- desc="type of curvature to compute")
- verbosity = Int(1, output=False, optional=True, desc="Verbosity level")
-
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output image volume containing the curvature of the isosurfaces "
+ mode: Literal["sum"] = field(
+ default="sum",
+ doc="type of curvature to compute")
+ verbosity: int = field(default=1, doc="Verbosity level")
+
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output image volume containing the curvature of the isosurfaces "
"of the input field"
)
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"ylIsoCurvature",
"--input", self.input_image,
"--mode", self.mode,
"--output", self.output_image,
- "--verbose", str(self.verbosity)]
+ "--verbose", str(self.verbosity),
+ ]
+ subprocess.check_call(cmd)
class RemoveNaN(capsul.api.Process):
"""Remove NaN values from an image"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image")
- value = Float(
- 0, output=False, optional=True,
- desc="replacement value")
- percentage = Bool(
- True, output=False, optional=True,
- desc="interpret value as a percentage of the image intensity range")
-
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output image"
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image")
+ value: float = field(
+ default=0.0,
+ doc="replacement value")
+ percentage: bool = field(
+ default=True,
+ doc="interpret value as a percentage of the image intensity range")
+
+ output_image: File = field(
+ write=True,
+ extensions=VOLUME_EXTENSIONS,
+ doc="output image"
)
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"AimsRemoveNaN",
"--verbose", "0",
"-i", self.input_image,
"-np", str(self.percentage),
"--value", repr(self.value),
- "-o", self.output_image]
+ "-o", self.output_image
+ ]
+ subprocess.check_call(cmd)
class MedianFilter(capsul.api.Process):
"""Median filter smoothing"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image")
- x_size = Int(3, output=False, optional=True,
- desc="X size of the filter mask")
- y_size = Int(3, output=False, optional=True,
- desc="Y size of the filter mask")
- z_size = Int(3, output=False, optional=True,
- desc="Z size of the filter mask")
-
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="median-filtered image"
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image")
+ x_size: int = field(default=3,
+ doc="X size of the filter mask")
+ y_size: int = field(default=3,
+ doc="Y size of the filter mask")
+ z_size: int = field(default=3,
+ doc="Z size of the filter mask")
+
+ output_image: File = field(
+ write=True,
+ extensions=VOLUME_EXTENSIONS,
+ doc="median-filtered image"
)
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"AimsMedianSmoothing",
"--verbose", "0",
"--input", self.input_image,
"--dx", str(self.x_size),
"--dy", str(self.y_size),
"--dz", str(self.z_size),
- "--output", self.output_image]
+ "--output", self.output_image
+ ]
+ subprocess.check_call(cmd)
class GaussianSmoothing(capsul.api.Process):
"""3D Gaussian smoothing filter using the recursive Deriche method"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image")
- xsigma = Float(Undefined, output=False, optional=True,
- desc="X standard deviation of the gaussian filter "
- "[default=largest voxel size]")
- ysigma = Float(Undefined, output=False, optional=True,
- desc="Y standard deviation of the gaussian filter "
- "[default=largest voxel size]")
- zsigma = Float(Undefined, output=False, optional=True,
- desc="Z standard deviation of the gaussian filter "
- "[default=largest voxel size]")
-
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="Gaussian-filtered image"
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image")
+ xsigma: float = field(
+ default=None,
+ doc="X standard deviation of the gaussian filter "
+ "[default=largest voxel size]")
+ ysigma: float = field(
+ default=None,
+ doc="Y standard deviation of the gaussian filter "
+ "[default=largest voxel size]")
+ zsigma: float = field(
+ default=None,
+ doc="Z standard deviation of the gaussian filter "
+ "[default=largest voxel size]")
+
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="Gaussian-filtered image"
)
- def get_commandline(self):
+ def execute(self, context):
sigma_args = []
- if self.xsigma is not Undefined:
+ if self.xsigma is not None:
sigma_args += ["--xsigma", str(self.xsigma)]
- if self.ysigma is not Undefined:
+ if self.ysigma is not None:
sigma_args += ["--ysigma", str(self.ysigma)]
- if self.zsigma is not Undefined:
+ if self.zsigma is not None:
sigma_args += ["--zsigma", str(self.zsigma)]
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ cmd = [
"AimsGaussianSmoothing",
"--input", self.input_image
] + sigma_args + [
"--output", self.output_image
]
+ subprocess.check_call(cmd)
class BinarizeCortex(capsul.api.Process):
"""Extract a binary image (0/1) of the cortex"""
- classif = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="classification image of the cortex (100 inside, 0 in CSF, "
+ classif: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="classification image of the cortex (100 inside, 0 in CSF, "
"200 in white matter)")
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="binary image of the cortex (1 in the cortex, 0 elsewhere)"
- )
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="binary image of the cortex (1 in the cortex, 0 elsewhere)")
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"AimsThreshold",
"--verbose", "0",
"-b",
@@ -242,101 +237,108 @@ def get_commandline(self):
"-m", "eq",
"-t", "100",
"--input", self.classif,
- "--output", self.output_image]
+ "--output", self.output_image,
+ ]
+ subprocess.check_call(cmd)
+
+
+class DomainTypeEnum(str, enum.Enum):
+ interpolated = "interpolated"
+ boolean = "boolean"
class AdvectTubesAlongGradient(capsul.api.Process):
"""Advect a tube from each voxel, return its volume and end surface."""
- domain = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="mask of the calculation domain: one inside, zero outside")
- grad_field = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="scalar field whose gradient is to be advected along")
- divergence = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="divergence of the normalized vector field")
- step_size = Float(
- 0.03, output=False, optional=True,
- desc="size of the advection step (millimetres)")
- upfield = Bool(
- False, optional=False,
- desc="Direction of advection (upfield if True, downfield if False)")
- max_dist = Float(
- 6, output=False, optional=True,
- desc="maximum advection distance (millimetres)")
- domain_type = Enum(
- "interpolated", "boolean", output=False, optional=True,
- desc="interpolation type for the domain")
- verbosity = Int(1, output=False, optional=True, desc="Verbosity level")
-
- output_volumes = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output volume containing the tubes' volume")
- output_surfaces = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output volume containing the tubes' end surface")
-
- def get_commandline(self):
+ domain: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="mask of the calculation domain: one inside, zero outside")
+ grad_field: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="scalar field whose gradient is to be advected along")
+ divergence: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="divergence of the normalized vector field")
+ step_size: float = field(
+ default=0.03,
+ doc="size of the advection step (millimetres)")
+ upfield: bool = field(
+ doc="Direction of advection (upfield if True, downfield if False)")
+ max_dist: float = field(
+ default=6,
+ doc="maximum advection distance (millimetres)")
+ domain_type: DomainTypeEnum = field(
+ default="interpolated",
+ doc="interpolation type for the domain")
+ verbosity: int = field(default=1, doc="Verbosity level")
+
+ output_volumes: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output volume containing the tubes' volume")
+ output_surfaces: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS, optional=True,
+ doc="output volume containing the tubes' end surface")
+
+ def execute(self, context):
command_step_size = ((-self.step_size) if self.upfield
else self.step_size)
- args = [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ cmd = [
"ylAdvectTubes",
"--domain", self.domain,
"--grad-field", self.grad_field,
"--divergence", self.divergence,
"--step-size", repr(command_step_size),
"--max-dist", repr(self.max_dist),
- "--domain-type", self.domain_type,
+ "--domain-type", self.domain_type.value,
"--verbose", str(self.verbosity),
"--output-volumes", self.output_volumes,
- "--output-surfaces", self.output_surfaces]
- return args
+ ]
+ if self.output_surfaces is not undefined:
+ cmd += ["--output-surfaces", self.output_surfaces]
+
+ subprocess.check_call(cmd)
class EuclideanAdvectionAlongGradient(capsul.api.Process):
"""Measure the Euclidean length of an advection path."""
- domain = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="mask of the calculation domain: one inside, zero outside")
- grad_field = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="scalar field whose gradient is to be advected along")
- step_size = Float(
- 0.03, output=False, optional=True,
- desc="size of the advection step (millimetres)")
- upfield = Bool(
- False, optional=False,
- desc="Direction of advection (upfield if True, downfield if False)")
- max_dist = Float(
- 6, output=False, optional=True,
- desc="maximum advection distance (millimetres)")
- domain_type = Enum(
- "interpolated", "boolean", output=False, optional=True,
- desc="interpolation type for the domain")
- verbosity = Int(1, output=False, optional=True, desc="Verbosity level")
-
- output_length = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output volume containing the length of the advection path")
-
- def get_commandline(self):
+ domain: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="mask of the calculation domain: one inside, zero outside")
+ grad_field: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="scalar field whose gradient is to be advected along")
+ step_size: float = field(
+ default=0.03,
+ doc="size of the advection step (millimetres)")
+ upfield: bool = field(
+ doc="Direction of advection (upfield if True, downfield if False)")
+ max_dist: float = field(
+ default=6,
+ doc="maximum advection distance (millimetres)")
+ domain_type: DomainTypeEnum = field(
+ default="interpolated",
+ doc="interpolation type for the domain")
+ verbosity: int = field(default=1, doc="Verbosity level")
+
+ output_length: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output volume containing the length of the advection path")
+
+ def execute(self, context):
command_step_size = ((-self.step_size) if self.upfield
else self.step_size)
- args = [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ cmd = [
"ylAdvectEuclidean",
"--domain", self.domain,
"--grad-field", self.grad_field,
"--step-size", repr(command_step_size),
"--max-dist", repr(self.max_dist),
- "--domain-type", self.domain_type,
+ "--domain-type", self.domain_type.value,
"--verbose", str(self.verbosity),
- "--output-length", self.output_length]
- return args
+ "--output-length", self.output_length
+ ]
+ subprocess.check_call(cmd)
class PostProcessEquivolumetricDepth(capsul.api.Process):
@@ -347,190 +349,188 @@ class PostProcessEquivolumetricDepth(capsul.api.Process):
- Set various Nifti header fields
"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image of equivolumetric depth")
- classif = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="classification image of the cortex (100 inside, 0 in CSF, "
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image of equivolumetric depth")
+ classif: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="classification image of the cortex (100 inside, 0 in CSF, "
"200 in white matter)")
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output image"
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output image"
)
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"ylPostProcessEquivolumetricDepth",
self.input_image,
self.classif,
- self.output_image]
+ self.output_image
+ ]
+ subprocess.check_call(cmd)
class ImageArithmetic2Inputs(capsul.api.Process):
"""Compute arithmetic from 2 input images"""
- input_image_1 = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image I1")
- input_image_2 = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image I2")
- formula = Str(
- Undefined, output=False,
- desc="arithmetic formula referring to I1 and I2")
-
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="result of the arithmetic"
+ input_image_1: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image I1")
+ input_image_2: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image I2")
+ formula: str = field(
+ doc="arithmetic formula referring to I1 and I2")
+
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="result of the arithmetic"
)
- def get_commandline(self):
- # bv_env automatically launches the command through Python on Windows
- return [
- "bv_env",
+ def execute(self, context):
+ cmd = [
"cartoLinearComb.py",
"-f", self.formula,
"-i", self.input_image_1,
"-i", self.input_image_2,
- "-o", self.output_image]
+ "-o", self.output_image
+ ]
+ subprocess.check_call(cmd)
class MergeImagesOneToOne(capsul.api.Process):
"""Merge values into an image using a mask image."""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image")
- mask_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="mask image (must have an integer voxel type)")
- label = Int(
- Undefined, output=False,
- desc="only label of the mask image to take into account")
- value = Float(
- Undefined, output=False,
- desc="replacement value")
-
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output image"
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image")
+ mask_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="mask image (must have an integer voxel type)")
+ label_to_replace: int = field(
+ doc="only label of the mask image to take into account")
+ value: float = field(
+ doc="replacement value")
+
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output image"
)
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"AimsMerge",
"--verbose", "0",
"-m", "oo",
- "-l", str(self.label),
+ "-l", str(self.label_to_replace),
"-v", repr(self.value),
"-i", self.input_image,
"-M", self.mask_image,
- "-o", self.output_image]
-
-
-class VolumeSink(capsul.api.Process):
- """Use this process to ignore a mandatory output."""
-
- file = File(Undefined, allowed_extensions=VOLUME_EXTENSIONS,
- desc="Volume file to be ignored")
-
- def _run_process(self):
- pass
+ "-o", self.output_image
+ ]
+ subprocess.check_call(cmd)
class EuclideanUpwindingAlongGradient(capsul.api.Process):
"""Compute distance to a boundary along the gradient of a scalar field."""
- domain = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="label image defining the computation domain")
- field = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="scalar field whose gradient is used as the integration "
+ domain: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="label image defining the computation domain")
+ scalar_field: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="scalar field whose gradient is used as the integration "
"direction")
- downfield = Bool(
- False, optional=False,
- desc="work on inverted field (downfield instead of upfield)")
- domain_label = Int(
- 100, optional=True,
- desc="label of the propagation domain")
- origin_label = Int(
- 0, optional=True,
- desc="label of the origin object")
- verbosity = Int(1, output=False, optional=True, desc="Verbosity level")
-
- output = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output volume containing the distance")
-
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ downfield: bool = field(
+ doc="work on inverted field (downfield instead of upfield)")
+ domain_label: int = field(
+ default=100,
+ doc="label of the propagation domain")
+ origin_label: int = field(
+ default=0,
+ doc="label of the origin object")
+ verbosity: int = field(default=1, doc="Verbosity level")
+
+ output: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output volume containing the distance")
+
+ def execute(self, context):
+ cmd = [
"ylUpwindDistance",
"--domain", self.domain,
- "--field", self.field,
+ "--field", self.scalar_field,
"--invert", str(self.downfield),
"--domain-label", str(self.domain_label),
"--origin-label", str(self.origin_label),
"--verbose", str(self.verbosity),
- "--output", self.output]
+ "--output", self.output
+ ]
+ subprocess.check_call(cmd)
class Distmaps(capsul.api.Process):
"""Compute distance maps to the boundaries of the cortex"""
- classif = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="classification image of the cortex (100 inside, 0 in CSF, "
+ classif: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="classification image of the cortex (100 inside, 0 in CSF, "
"200 in white matter)")
- distwhite = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="signed Euclidean distance to the white matter interface"
+ distwhite: File = field(
+ default=os.devnull,
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="signed Euclidean distance to the white matter interface"
)
- distCSF = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="signed Euclidean distance to the CSF interface"
+ distCSF: File = field(
+ default=os.devnull,
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="signed Euclidean distance to the CSF interface"
)
- classif_with_outer_boundaries = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="classification image of the cortex with labelled boundaries "
+ classif_with_outer_boundaries: File = field(
+ default=os.devnull,
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="classification image of the cortex with labelled boundaries "
"(50 on the CSF, 150 on the white matter)")
- def get_commandline(self):
- # bv_env automatically launches the command through Python on Windows
- return [
- "bv_env",
+ def execute(self, context):
+ cmd = [
"ylDistmaps",
self.classif,
self.distwhite,
self.distCSF,
self.classif_with_outer_boundaries
]
+ subprocess.check_call(cmd)
+
+
+class ThresholdModeEnum(str, enum.Enum):
+ eq = "eq"
+ di = "di"
+ lt = "lt"
+ le = "le"
+ gt = "gt"
+ ge = "ge"
class ImageSingleThreshold(capsul.api.Process):
"""Threshold an image"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image")
- binary = Bool(
- False, output=False, optional=True,
- desc="return a binary result as int16")
- fg = Int(
- 32767, output=False, optional=True,
- desc="foreground value set on thresholded in voxels in binary mode")
- threshold = Float(
- Undefined, output=False,
- desc="value of the threshold")
- mode = Enum(
- "eq", "di", "lt", "le", "gt", "ge", output=False,
- desc="""\
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image")
+ binary: bool = field(
+ default=False,
+ doc="return a binary result as int16")
+ fg: int = field(
+ default=32767,
+ doc="foreground value set on thresholded in voxels in binary mode")
+ threshold: float = field(
+ doc="value of the threshold")
+ mode: ThresholdModeEnum = field(
+ doc="""\
thresholding type
lt --> lower than
le --> lower or equal to
@@ -540,97 +540,110 @@ class ImageSingleThreshold(capsul.api.Process):
di --> differ
""")
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="thresholded image")
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="thresholded image")
- def get_commandline(self):
+ def execute(self, context):
cmd = [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
"AimsThreshold",
"--verbose", "0",
"-b", str(self.binary),
- "-m", self.mode,
+ "-m", self.mode.value,
"-t", repr(self.threshold),
"--input", self.input_image,
"--output", self.output_image
]
if self.binary:
cmd += ["--fg", str(self.fg)]
- return cmd
+ subprocess.check_call(cmd)
class LabelEachVoxel(capsul.api.Process):
"""Assign a unique label to each voxel of a mask"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input mask")
- first_label = Int(
- 1, output=False, optional=True,
- desc="assign labels starting with this value")
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input mask")
+ first_label: int = field(
+ default=1,
+ doc="assign labels starting with this value")
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output label volume with S32 datatype")
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output label volume with S32 datatype")
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"ylLabelEachVoxel",
"--first-label", str(self.first_label),
"--input", self.input_image,
"--output", self.output_image
]
+ subprocess.check_call(cmd)
+
+
+class VolumeDataTypeEnum(str, enum.Enum):
+ CDOUBLE = "CDOUBLE"
+ CFLOAT = "CFLOAT"
+ DOUBLE = "DOUBLE"
+ FLOAT = "FLOAT"
+ HSV = "HSV"
+ POINT3DF = "POINT3DF"
+ RGB = "RGB"
+ RGBA = "RGBA"
+ S16 = "S16"
+ S32 = "S32"
+ S8 = "S8"
+ U16 = "U16"
+ U32 = "U32"
+ U8 = "U8"
+ VECTOR_OF_3_SHORT = "VECTOR_OF_3_SHORT"
+ VECTOR_OF_6_FLOAT = "VECTOR_OF_6_FLOAT"
class ConvertDataType(capsul.api.Process):
"""Convert the data type of an image"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image")
- data_type = Enum(
- "CDOUBLE", "CFLOAT", "DOUBLE", "FLOAT", "HSV", "POINT3DF", "RGB",
- "RGBA", "S16", "S32", "S8", "U16", "U32", "U8", "VECTOR_OF_3_SHORT",
- "VECTOR_OF_6_FLOAT", output=False,
- desc="output data type")
-
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output label volume with S32 datatype")
-
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image")
+ data_type: VolumeDataTypeEnum = field(
+ doc="output data type")
+
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output label volume with S32 datatype")
+
+ def execute(self, context):
+ cmd = [
"AimsFileConvert",
- "--type", self.data_type,
+ "--type", self.data_type.value,
"--input", self.input_image,
"--output", self.output_image
]
+ subprocess.check_call(cmd)
class MergeImagesAllToOne(capsul.api.Process):
"""Merge values into an image using a mask image."""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image")
- mask_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="mask image (must have an integer voxel type)")
- value = Float(
- Undefined, output=False,
- desc="replacement value")
-
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output image"
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image")
+ mask_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="mask image (must have an integer voxel type)")
+ value: float = field(
+ doc="replacement value")
+
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output image"
)
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"AimsMerge",
"--mode", "ao",
"--value", repr(self.value),
@@ -638,75 +651,74 @@ def get_commandline(self):
"--Mask", self.mask_image,
"--output", self.output_image
]
+ subprocess.check_call(cmd)
class MergeImagesSameValues(capsul.api.Process):
"""Merge values into an image using a mask image."""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input image")
- mask_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="mask image (must have an integer voxel type)")
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input image")
+ mask_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="mask image (must have an integer voxel type)")
- output_image = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output image"
+ output_image: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output image"
)
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ def execute(self, context):
+ cmd = [
"AimsMerge",
"--mode", "sv",
"--input", self.input_image,
"--Mask", self.mask_image,
"--output", self.output_image
]
+ subprocess.check_call(cmd)
class PropagateAlongFieldGradient(capsul.api.Process):
"""Propagate labels along the gradient of a scalar field."""
- seeds = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="""\
+ seeds: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="""\
volume of labels (either S16 or S32):
- positive labels are seeds,
- zero is the region of propagation,
- negative labels are forbidden regions.
""")
- target_label = Int(
- 0, output=False, optional=True,
- desc="voxels having this label are used as advection starting points")
- grad_field = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="scalar field whose gradient is to be advected along")
- step_size = Float(
- 0.03, output=False, optional=True,
- desc="size of the advection step (millimetres)")
- upfield = Bool(
- False, optional=False,
- desc="Direction of advection (upfield if True, downfield if False)")
- max_dist = Float(
- 6, output=False, optional=True,
- desc="maximum advection distance (millimetres)")
- verbosity = Int(1, output=False, optional=True, desc="Verbosity level")
-
- output_labels = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output the propagated labels")
- dest_points = File(
- Undefined, output=True, optional=True,
- allowed_extensions=VOLUME_EXTENSIONS,
- desc="output the destination points for each propagated voxel")
-
- def get_commandline(self):
+ target_label: int = field(
+ default=0,
+ doc="voxels having this label are used as advection starting points")
+ grad_field: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="scalar field whose gradient is to be advected along")
+ step_size: float = field(
+ default=0.03,
+ doc="size of the advection step (millimetres)")
+ upfield: bool = field(
+ doc="Direction of advection (upfield if True, downfield if False)")
+ max_dist: float = field(
+ default=6,
+ doc="maximum advection distance (millimetres)")
+ verbosity: int = field(default=1, doc="Verbosity level")
+
+ output_labels: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output the propagated labels")
+ dest_points: File = field(
+ write=True, optional=True,
+ extensions=VOLUME_EXTENSIONS,
+ doc="output the destination points for each propagated voxel")
+
+ def execute(self, context):
command_step_size = ((-self.step_size) if self.upfield
else self.step_size)
- args = [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ cmd = [
"ylPropagateAlongField",
"--seeds", self.seeds,
"--grad-field", self.grad_field,
@@ -716,123 +728,133 @@ def get_commandline(self):
"--verbose", str(self.verbosity),
"--output", self.output_labels,
]
- if self.dest_points is not Undefined:
- args += ["--dest-points", self.dest_points]
- return args
+ if self.dest_points is not undefined:
+ cmd += ["--dest-points", self.dest_points]
+ subprocess.check_call(cmd)
class GetExchangedPropagationVolume(capsul.api.Process):
"""Get a volume of exchanged propagation labels"""
- classif_with_outer_boundaries = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="classification image of the cortex (100 inside, 0 in CSF, "
+ classif_with_outer_boundaries: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="classification image of the cortex (100 inside, 0 in CSF, "
"200 in white matter, 50 on the CSF border, 150 on the white matter "
"border)")
- CSF_labels_on_white = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="labels of the CSF projected onto the white matter boundary")
- white_labels_on_CSF = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="labels of the white matter projected onto the CSF boundary")
-
- output = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="volume where each interface is labelled with connected "
+ CSF_labels_on_white: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="labels of the CSF projected onto the white matter boundary")
+ white_labels_on_CSF: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="labels of the white matter projected onto the CSF boundary")
+
+ output: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="volume where each interface is labelled with connected "
"components facing the same voxels of the other interface")
- def get_commandline(self):
- # bv_env automatically launches the command through Python on Windows
- return [
- "bv_env",
+ def execute(self, context):
+ cmd = [
"ylGetExchangedPropvol",
self.classif_with_outer_boundaries,
self.CSF_labels_on_white,
self.white_labels_on_CSF,
self.output
]
+ subprocess.check_call(cmd)
class RelabelConjunction(capsul.api.Process):
"""Assign new labels to voxels that have the same pair of labels"""
- labels1 = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input label image")
- labels2 = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input label image")
-
- output = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output label image")
-
- def get_commandline(self):
- # bv_env automatically launches the command through Python on Windows
- return [
- "bv_env",
+ labels1: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input label image")
+ labels2: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input label image")
+
+ output: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output label image")
+
+ def execute(self, context):
+ cmd = [
"ylRelabelConjunction",
self.labels1,
self.labels2,
self.output
]
+ subprocess.check_call(cmd)
+
+
+# TODO restore enum
+# class ConnectivityTypeEnum(str, enum.Enum):
+# c26: "26"
+# c4xy: "4xy" # noqa: F722
+# c4xz: "4xz" # noqa: F722
+# c4yz: "4yz" # noqa: F722
+# c6: "6"
+# c8xy: "8xy" # noqa: F722
+# c8xz: "8xz" # noqa: F722
+# c8yz: "8yz" # noqa: F722
+# c18: "18"
+ConnectivityTypeEnum = str
class ConnectedComponents(capsul.api.Process):
"""Extract connected components of a labelled volume"""
- input_image = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input label image")
- connectivity = Enum(
- "26", "4xy", "4xz", "4yz", "6", "8xy", "8xz", "8yz", "18",
- output=False, optional=True,
- desc="connectivity")
-
- output = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output labelled connected components volume")
-
- def get_commandline(self):
- return [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ input_image: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input label image")
+ connectivity: ConnectivityTypeEnum = field(
+ default="26",
+ doc="connectivity")
+
+ output: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output labelled connected components volume")
+
+ def execute(self, context):
+ cmd = [
"AimsConnectComp",
"--input", self.input_image,
"--output", self.output,
- "--connectivity", self.connectivity,
+ "--connectivity", self.connectivity
]
+ subprocess.check_call(cmd)
class MergeCortexColumnRegions(capsul.api.Process):
"""Aggregate over-segmented cortical traverses."""
- input_traverses = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input label volume")
- proj_csf = File(
- Undefined, output=False, optional=True,
- allowed_extensions=VOLUME_EXTENSIONS,
- desc="projected coordinates of the CSF surface")
- proj_white = File(
- Undefined, output=False, optional=True,
- allowed_extensions=VOLUME_EXTENSIONS,
- desc="projected coordinates of the white surface")
- classif = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="classification image of the cortex (100 inside, 0 in CSF, "
+ input_traverses: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input label volume")
+ proj_csf: File = field(
+ optional=True,
+ extensions=VOLUME_EXTENSIONS,
+ doc="projected coordinates of the CSF surface")
+ proj_white: File = field(
+ optional=True,
+ extensions=VOLUME_EXTENSIONS,
+ doc="projected coordinates of the white surface")
+ classif: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="classification image of the cortex (100 inside, 0 in CSF, "
"200 in white matter)")
- goal_diameter = Float(
- 0.5, output=False, optional=True,
- desc="goal region diameter (millimetres)")
- verbosity = Int(2, output=False, optional=True, desc="Verbosity level")
-
- output = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output label volume")
-
- def get_commandline(self):
- args = [
- "bv_env", # needed to set DYLD_* in environment on Mac OS 10.11+
+ goal_diameter: float = field(
+ default=0.5,
+ doc="goal region diameter (millimetres)")
+ verbosity: int = field(default=2, doc="Verbosity level")
+
+ output: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output label volume")
+
+ def execute(self, context):
+ cmd = [
"ylMergeCortexColumnRegions",
"--input", self.input_traverses,
"--proj-csf", self.proj_csf,
@@ -842,46 +864,60 @@ def get_commandline(self):
"--verbose", str(self.verbosity),
"--output", self.output,
]
- return args
+ subprocess.check_call(cmd)
class Relabel(capsul.api.Process):
"""Assign new consecutive labels to an existing label image"""
- input = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input label image")
+ input: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input label image")
- output = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output label image")
+ output: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output label image")
- def get_commandline(self):
- # bv_env automatically launches the command through Python on Windows
- return [
- "bv_env",
+ def execute(self, context):
+ cmd = [
"ylRelabel",
self.input,
self.output
]
+ subprocess.check_call(cmd)
class RandomizeLabels(capsul.api.Process):
"""Randomize the labels of an image with consecutive labels"""
- input = File(
- Undefined, output=False, allowed_extensions=VOLUME_EXTENSIONS,
- desc="input label image")
+ input: File = field(
+ extensions=VOLUME_EXTENSIONS,
+ doc="input label image")
- output = File(
- Undefined, output=True, allowed_extensions=VOLUME_EXTENSIONS,
- desc="output label image")
+ output: File = field(
+ write=True, extensions=VOLUME_EXTENSIONS,
+ doc="output label image")
- def get_commandline(self):
- # bv_env automatically launches the command through Python on Windows
- return [
- "bv_env",
+ def execute(self, context):
+ cmd = [
"ylRandomizeLabels",
self.input,
self.output
]
+ subprocess.check_call(cmd)
+
+
+# TODO delete
+if __name__ == '__main__':
+ import sys
+ sys.stdout.flush()
+ from soma.qt_gui.qt_backend import QtGui
+ from capsul.qt_gui.widgets import PipelineDeveloperView
+ pipeline = capsul.api.Capsul.executable(
+ 'highres_cortex.capsul.isovolume')
+ app = QtGui.QApplication.instance()
+ if not app:
+ app = QtGui.QApplication(sys.argv)
+ view1 = PipelineDeveloperView(pipeline, show_sub_pipelines=True)
+ view1.show()
+ app.exec_()
diff --git a/python/highres_cortex/capsul/thickness_adv.json b/python/highres_cortex/capsul/thickness_adv.json
new file mode 100644
index 0000000..ebe2f36
--- /dev/null
+++ b/python/highres_cortex/capsul/thickness_adv.json
@@ -0,0 +1,78 @@
+{
+ "type": "custom_pipeline",
+ "name": "thickness_adv",
+ "definition": {
+ "export_parameters": false,
+ "doc": "Compute the cortical thickness along Laplace field lines from a classification volume, using Eulerian advection (slightly more precise than upwinding, but much slower).",
+ "executables": {
+ "laplace": {
+ "definition": "highres_cortex.capsul.processes.Laplacian",
+ "type": "process"
+ },
+ "binarize_cortex": {
+ "definition": "highres_cortex.capsul.processes.BinarizeCortex",
+ "type": "process"
+ },
+ "advect_toward_pial": {
+ "definition": "highres_cortex.capsul.processes.EuclideanAdvectionAlongGradient",
+ "type": "process",
+ "parameters": {
+ "domain_type": "interpolated",
+ "upfield": true
+ }
+ },
+ "advect_toward_white": {
+ "definition": "highres_cortex.capsul.processes.EuclideanAdvectionAlongGradient",
+ "type": "process",
+ "parameters": {
+ "domain_type": "interpolated",
+ "upfield": false
+ }
+ },
+ "total_length": {
+ "definition": "highres_cortex.capsul.processes.ImageArithmetic2Inputs",
+ "type": "process",
+ "parameters": {
+ "formula": "I1 + I2"
+ }
+ },
+ "equidistant_depth": {
+ "definition": "highres_cortex.capsul.processes.ImageArithmetic2Inputs",
+ "type": "process",
+ "parameters": {
+ "formula": "I1 / I2"
+ }
+ }
+ },
+ "parameters": {
+ "laplace_precision": 0.001,
+ "laplace_typical_cortical_thickness": 3.0,
+ "advection_max_dist": 6.0,
+ "advection_step_size": 0.03,
+ "verbosity": 1
+ },
+ "links": [
+ "classif->binarize_cortex.classif",
+ "classif->laplace.classif",
+ "verbosity->advect_toward_pial.verbosity",
+ "verbosity->advect_toward_white.verbosity",
+ "verbosity->laplace.verbosity",
+ "laplace_precision->laplace.precision",
+ "laplace_typical_cortical_thickness->laplace.typical_cortical_thickness",
+ "advection_step_size->advect_toward_white.step_size",
+ "advection_step_size->advect_toward_pial.step_size",
+ "advection_max_dist->advect_toward_pial.max_dist",
+ "advection_max_dist->advect_toward_white.max_dist",
+ "laplace.laplace_field->advect_toward_white.grad_field",
+ "laplace.laplace_field->advect_toward_pial.grad_field",
+ "binarize_cortex.output_image->advect_toward_white.domain",
+ "binarize_cortex.output_image->advect_toward_pial.domain",
+ "advect_toward_pial.output_length->total_length.input_image_1",
+ "advect_toward_pial.output_length->equidistant_depth.input_image_1",
+ "advect_toward_white.output_length->total_length.input_image_2",
+ "total_length.output_image->equidistant_depth.input_image_2",
+ "total_length.output_image->thickness_image",
+ "equidistant_depth.output_image->equidistant_depth"
+ ]
+ }
+}
diff --git a/python/highres_cortex/capsul/thickness_adv.xml b/python/highres_cortex/capsul/thickness_adv.xml
deleted file mode 100644
index 4dc5e02..0000000
--- a/python/highres_cortex/capsul/thickness_adv.xml
+++ /dev/null
@@ -1,118 +0,0 @@
-
-
-
- Compute the cortical thickness along Laplace field lines from a classification volume, using Eulerian advection (slightly more precise than upwinding, but much slower)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/python/highres_cortex/capsul/thickness_upw.json b/python/highres_cortex/capsul/thickness_upw.json
new file mode 100644
index 0000000..70448a9
--- /dev/null
+++ b/python/highres_cortex/capsul/thickness_upw.json
@@ -0,0 +1,69 @@
+{
+ "type": "custom_pipeline",
+ "name": "thickness_upw",
+ "definition": {
+ "export_parameters": false,
+ "doc": "Compute the cortical thickness along Laplace field lines from a classification volume, using upwinding (much faster than advection, but slightly less precise).",
+ "executables": {
+ "laplace": {
+ "definition": "highres_cortex.capsul.processes.Laplacian",
+ "type": "process"
+ },
+ "upwind_from_pial": {
+ "definition": "highres_cortex.capsul.processes.EuclideanUpwindingAlongGradient",
+ "type": "process",
+ "parameters": {
+ "domain_label": 100,
+ "downfield": false,
+ "origin_label": 0
+ }
+ },
+ "upwind_from_white": {
+ "definition": "highres_cortex.capsul.processes.EuclideanUpwindingAlongGradient",
+ "type": "process",
+ "parameters": {
+ "domain_label": 100,
+ "downfield": true,
+ "origin_label": 200
+ }
+ },
+ "total_length": {
+ "definition": "highres_cortex.capsul.processes.ImageArithmetic2Inputs",
+ "type": "process",
+ "parameters": {
+ "formula": "I1 + I2"
+ }
+ },
+ "equidistant_depth": {
+ "definition": "highres_cortex.capsul.processes.ImageArithmetic2Inputs",
+ "type": "process",
+ "parameters": {
+ "formula": "I1 / I2"
+ }
+ }
+ },
+ "parameters": {
+ "laplace_precision": 0.001,
+ "laplace_typical_cortical_thickness": 3.0,
+ "verbosity": 1
+ },
+ "links": [
+ "classif->upwind_from_white.domain",
+ "classif->laplace.classif",
+ "classif->upwind_from_pial.domain",
+ "verbosity->upwind_from_white.verbosity",
+ "verbosity->laplace.verbosity",
+ "verbosity->upwind_from_pial.verbosity",
+ "laplace_precision->laplace.precision",
+ "laplace_typical_cortical_thickness->laplace.typical_cortical_thickness",
+ "laplace.laplace_field->upwind_from_pial.scalar_field",
+ "laplace.laplace_field->upwind_from_white.scalar_field",
+ "upwind_from_pial.output->equidistant_depth.input_image_1",
+ "upwind_from_pial.output->total_length.input_image_1",
+ "upwind_from_white.output->total_length.input_image_2",
+ "total_length.output_image->equidistant_depth.input_image_2",
+ "total_length.output_image->thickness_image",
+ "equidistant_depth.output_image->equidistant_depth"
+ ]
+ }
+}
diff --git a/python/highres_cortex/capsul/thickness_upw.xml b/python/highres_cortex/capsul/thickness_upw.xml
deleted file mode 100644
index 54b57bb..0000000
--- a/python/highres_cortex/capsul/thickness_upw.xml
+++ /dev/null
@@ -1,112 +0,0 @@
-
-
-
- Compute the cortical thickness along Laplace field lines from a classification volume, using upwinding (much faster than advection, but slightly less precise)
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/python/highres_cortex/capsul/traverses.json b/python/highres_cortex/capsul/traverses.json
new file mode 100644
index 0000000..3d7b4fd
--- /dev/null
+++ b/python/highres_cortex/capsul/traverses.json
@@ -0,0 +1,233 @@
+{
+ "type": "custom_pipeline",
+ "name": "traverses",
+ "definition": {
+ "export_parameters": false,
+ "doc": "Create cortical traverses. Note that there is a problem with the propagation of labels: the step size is fixed, which means that sometimes the point can skip the corner of a voxel, and thus go directly from a bulk voxel to an outside voxel. In this case it is recorded as a \"dead-end\" advection path, no resulting label is recorded and it appears as zero in the result. This problem also appears in the previous \"exchange\" step, but is mitigated by the subsequent connex component detection (each failed propagation is assigned a different label). Quick fix: fix the conjunction step to not aggregate zeros. TODO: the proper way to fix this would be to force the advection path to respect the boundaries of voxels, so that the corner of voxels cannot be skipped over. This would also prevent the advection path from crossing the thin CSF surface within the sulcus (comes from skeleton).",
+ "executables": {
+ "laplace": {
+ "definition": "highres_cortex.capsul.processes.Laplacian",
+ "type": "process"
+ },
+ "distmaps": {
+ "definition": "highres_cortex.capsul.processes.Distmaps",
+ "type": "process"
+ },
+ "threshold_CSF_interface": {
+ "definition": "highres_cortex.capsul.processes.ImageSingleThreshold",
+ "type": "process",
+ "parameters": {
+ "binary": true,
+ "fg": 32767,
+ "mode": "eq",
+ "threshold": 50.0
+ }
+ },
+ "threshold_white_interface": {
+ "definition": "highres_cortex.capsul.processes.ImageSingleThreshold",
+ "type": "process",
+ "parameters": {
+ "binary": true,
+ "fg": 32767,
+ "mode": "eq",
+ "threshold": 150.0
+ }
+ },
+ "label_CSF_interface": {
+ "definition": "highres_cortex.capsul.processes.LabelEachVoxel",
+ "type": "process",
+ "parameters": {
+ "first_label": 100000001
+ }
+ },
+ "label_white_interface": {
+ "definition": "highres_cortex.capsul.processes.LabelEachVoxel",
+ "type": "process",
+ "parameters": {
+ "first_label": 200000001
+ }
+ },
+ "negative_cortex": {
+ "definition": "highres_cortex.capsul.processes.ImageSingleThreshold",
+ "type": "process",
+ "parameters": {
+ "binary": true,
+ "fg": -1,
+ "mode": "di",
+ "threshold": 100.0
+ }
+ },
+ "negative_cortex_converter": {
+ "definition": "highres_cortex.capsul.processes.ConvertDataType",
+ "type": "process",
+ "parameters": {
+ "data_type": "S32"
+ }
+ },
+ "CSF_negative_cortex_merger": {
+ "definition": "highres_cortex.capsul.processes.MergeImagesSameValues",
+ "type": "process"
+ },
+ "CSF_labelled_interface_merger": {
+ "definition": "highres_cortex.capsul.processes.MergeImagesAllToOne",
+ "type": "process",
+ "parameters": {
+ "value": 200000000.0
+ }
+ },
+ "white_negative_cortex_merger": {
+ "definition": "highres_cortex.capsul.processes.MergeImagesSameValues",
+ "type": "process"
+ },
+ "white_labelled_interface_merger": {
+ "definition": "highres_cortex.capsul.processes.MergeImagesAllToOne",
+ "type": "process",
+ "parameters": {
+ "value": 100000000.0
+ }
+ },
+ "CSF_to_white_propagation": {
+ "definition": "highres_cortex.capsul.processes.PropagateAlongFieldGradient",
+ "type": "process",
+ "parameters": {
+ "max_dist": 6.0,
+ "target_label": 200000000,
+ "upfield": true
+ }
+ },
+ "white_to_CSF_propagation": {
+ "definition": "highres_cortex.capsul.processes.PropagateAlongFieldGradient",
+ "type": "process",
+ "parameters": {
+ "max_dist": 6.0,
+ "target_label": 100000000,
+ "upfield": false
+ }
+ },
+ "get_exchanged_propvol": {
+ "definition": "highres_cortex.capsul.processes.GetExchangedPropagationVolume",
+ "type": "process"
+ },
+ "merge_exchanged_on_CSF": {
+ "definition": "highres_cortex.capsul.processes.MergeImagesOneToOne",
+ "type": "process",
+ "parameters": {
+ "label_to_replace": 150,
+ "value": 0.0
+ }
+ },
+ "merge_exchanged_on_white": {
+ "definition": "highres_cortex.capsul.processes.MergeImagesOneToOne",
+ "type": "process",
+ "parameters": {
+ "label_to_replace": 50,
+ "value": 0.0
+ }
+ },
+ "CSF_on_bulk_propagation": {
+ "definition": "highres_cortex.capsul.processes.PropagateAlongFieldGradient",
+ "type": "process",
+ "parameters": {
+ "max_dist": 6.0,
+ "target_label": 0,
+ "upfield": true
+ }
+ },
+ "white_on_bulk_propagation": {
+ "definition": "highres_cortex.capsul.processes.PropagateAlongFieldGradient",
+ "type": "process",
+ "parameters": {
+ "max_dist": 6.0,
+ "target_label": 0,
+ "upfield": false
+ }
+ },
+ "relabel_conjunction": {
+ "definition": "highres_cortex.capsul.processes.RelabelConjunction",
+ "type": "process"
+ },
+ "connected_conjunction": {
+ "definition": "highres_cortex.capsul.processes.ConnectedComponents",
+ "type": "process",
+ "parameters": {
+ "connectivity": "26"
+ }
+ },
+ "merge_regions": {
+ "definition": "highres_cortex.capsul.processes.MergeCortexColumnRegions",
+ "type": "process"
+ },
+ "relabel": {
+ "definition": "highres_cortex.capsul.processes.Relabel",
+ "type": "process"
+ },
+ "randomize_labels": {
+ "definition": "highres_cortex.capsul.processes.RandomizeLabels",
+ "type": "process"
+ }
+ },
+ "parameters": {
+ "advection_step_size": 0.03,
+ "goal_traverse_diameter": 0.5,
+ "laplace_precision": 0.001,
+ "laplace_typical_cortical_thickness": 3.0,
+ "verbosity": 1
+ },
+ "links": [
+ "classif->negative_cortex.input_image",
+ "classif->merge_regions.classif",
+ "classif->laplace.classif",
+ "classif->distmaps.classif",
+ "verbosity->white_to_CSF_propagation.verbosity",
+ "verbosity->CSF_to_white_propagation.verbosity",
+ "verbosity->merge_regions.verbosity",
+ "verbosity->white_on_bulk_propagation.verbosity",
+ "verbosity->laplace.verbosity",
+ "verbosity->CSF_on_bulk_propagation.verbosity",
+ "laplace_precision->laplace.precision",
+ "laplace_typical_cortical_thickness->laplace.typical_cortical_thickness",
+ "advection_step_size->CSF_on_bulk_propagation.step_size",
+ "advection_step_size->white_on_bulk_propagation.step_size",
+ "advection_step_size->CSF_to_white_propagation.step_size",
+ "advection_step_size->white_to_CSF_propagation.step_size",
+ "goal_traverse_diameter->merge_regions.goal_diameter",
+ "laplace.laplace_field->white_to_CSF_propagation.grad_field",
+ "laplace.laplace_field->CSF_on_bulk_propagation.grad_field",
+ "laplace.laplace_field->white_on_bulk_propagation.grad_field",
+ "laplace.laplace_field->CSF_to_white_propagation.grad_field",
+ "distmaps.classif_with_outer_boundaries->merge_exchanged_on_CSF.mask_image",
+ "distmaps.classif_with_outer_boundaries->get_exchanged_propvol.classif_with_outer_boundaries",
+ "distmaps.classif_with_outer_boundaries->threshold_white_interface.input_image",
+ "distmaps.classif_with_outer_boundaries->threshold_CSF_interface.input_image",
+ "distmaps.classif_with_outer_boundaries->merge_exchanged_on_white.mask_image",
+ "threshold_CSF_interface.output_image->label_CSF_interface.input_image",
+ "threshold_white_interface.output_image->label_white_interface.input_image",
+ "label_CSF_interface.output_image->CSF_negative_cortex_merger.mask_image",
+ "label_CSF_interface.output_image->white_labelled_interface_merger.mask_image",
+ "label_white_interface.output_image->CSF_labelled_interface_merger.mask_image",
+ "label_white_interface.output_image->white_negative_cortex_merger.mask_image",
+ "negative_cortex.output_image->negative_cortex_converter.input_image",
+ "negative_cortex_converter.output_image->white_negative_cortex_merger.input_image",
+ "negative_cortex_converter.output_image->CSF_negative_cortex_merger.input_image",
+ "CSF_negative_cortex_merger.output_image->CSF_labelled_interface_merger.input_image",
+ "CSF_labelled_interface_merger.output_image->CSF_to_white_propagation.seeds",
+ "white_negative_cortex_merger.output_image->white_labelled_interface_merger.input_image",
+ "white_labelled_interface_merger.output_image->white_to_CSF_propagation.seeds",
+ "CSF_to_white_propagation.output_labels->get_exchanged_propvol.CSF_labels_on_white",
+ "white_to_CSF_propagation.output_labels->get_exchanged_propvol.white_labels_on_CSF",
+ "get_exchanged_propvol.output->merge_exchanged_on_CSF.input_image",
+ "get_exchanged_propvol.output->merge_exchanged_on_white.input_image",
+ "merge_exchanged_on_CSF.output_image->CSF_on_bulk_propagation.seeds",
+ "merge_exchanged_on_white.output_image->white_on_bulk_propagation.seeds",
+ "CSF_on_bulk_propagation.dest_points->merge_regions.proj_csf",
+ "CSF_on_bulk_propagation.output_labels->relabel_conjunction.labels1",
+ "white_on_bulk_propagation.dest_points->merge_regions.proj_white",
+ "white_on_bulk_propagation.output_labels->relabel_conjunction.labels2",
+ "relabel_conjunction.output->connected_conjunction.input_image",
+ "connected_conjunction.output->merge_regions.input_traverses",
+ "merge_regions.output->relabel.input",
+ "relabel.output->randomize_labels.input",
+ "randomize_labels.output->cortical_traverses"
+ ]
+ }
+}
diff --git a/python/highres_cortex/capsul/traverses.xml b/python/highres_cortex/capsul/traverses.xml
deleted file mode 100644
index 959405c..0000000
--- a/python/highres_cortex/capsul/traverses.xml
+++ /dev/null
@@ -1,313 +0,0 @@
-
-
-
- Create cortical traverses
-
- .. note::
- There is a problem with the propagation of labels: the step size is
- fixed, which means that sometimes the point can skip the corner of a
- voxel, and thus go directly from a bulk voxel to an outside voxel. In
- this case it is recorded as a "dead-end" advection path, no resulting
- label is recorded and it appears as zero in the result.
-
- This problem also appears in the previous "exchange" step, but is
- mitigated by the subsequent connex component detection (each failed
- propagation is assigned a different label).
-
- Quick fix: fix the conjunction step to not aggregate zeros.
-
- TODO: the proper way to fix this would be to force the advection path
- to respect the boundaries of voxels, so that the corner of voxels
- cannot be skipped over. This would also prevent the advection path
- from crossing the thin CSF surface within the sulcus (comes from
- skeleton).
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/python/highres_cortex/test/test_capsul.py b/python/highres_cortex/test/test_capsul.py
index 113e1b9..6a7bdf5 100644
--- a/python/highres_cortex/test/test_capsul.py
+++ b/python/highres_cortex/test/test_capsul.py
@@ -48,59 +48,74 @@
class SphereTestCase(unittest.TestCase):
- def setUp(self):
+ @classmethod
+ def setUpClass(cls):
try:
- self.test_dir = tempfile.mkdtemp(
+ cls.test_dir = tempfile.mkdtemp(
prefix="highres-cortex-capsul-tests")
synthetic_data.write_sphere_and_reference_result(
- 1, 4, 0.3, dir=self.test_dir)
+ 1, 4, 0.3, dir=cls.test_dir)
- self.result_comp = compare_with_reference.ResultComparator(
- self.test_dir)
+ cls.result_comp = compare_with_reference.ResultComparator(
+ cls.test_dir)
- p1 = capsul.api.get_process_instance(
+ cls.capsul = capsul.api.Capsul(
+ "test-highres-cortex",
+ site_file=None,
+ user_file=None,
+ database_path=os.path.join(cls.test_dir, "capsul.rdb")
+ )
+ cls.capsul_engine = cls.capsul.engine()
+
+ p1 = capsul.api.executable(
"highres_cortex.capsul.processes.BinarizeCortex")
- p1.classif = os.path.join(self.test_dir, "classif.nii.gz")
- p1.output_image = os.path.join(self.test_dir, "cortex_mask.nii.gz")
- p1()
+ p1.classif = os.path.join(cls.test_dir, "classif.nii.gz")
+ p1.output_image = os.path.join(cls.test_dir, "cortex_mask.nii.gz")
+ with cls.capsul_engine as ce:
+ ce.run(p1)
except BaseException:
- if hasattr(self, "test_dir"):
- shutil.rmtree(self.test_dir)
+ if hasattr(cls, "test_dir"):
+ shutil.rmtree(cls.test_dir)
raise
if os.environ.get('KEEP_TEMPORARY'):
- print('highres-cortex test directory is {0}'.format(self.test_dir))
+ print('highres-cortex test directory is {0}'.format(cls.test_dir))
- def tearDown(self):
+ @classmethod
+ def tearDownClass(cls):
if not os.environ.get('KEEP_TEMPORARY'):
- shutil.rmtree(self.test_dir)
+ shutil.rmtree(cls.test_dir)
+ # Tests are run in lexicographical order. The 1 prefix is a workaround for
+ # https://github.com/populse/capsul/issues/325
def test_laplacian(self):
- p = capsul.api.get_process_instance(
+ p = capsul.api.executable(
"highres_cortex.capsul.processes.Laplacian")
p.classif = os.path.join(self.test_dir, "classif.nii.gz")
p.precision = 0.001
p.typical_cortical_thickness = 3
p.laplace_field = os.path.join(self.test_dir, "laplacian.nii.gz")
- p()
+ with self.capsul_engine as ce:
+ ce.run(p)
res = self.result_comp.ensure_max_rms_error(
"laplacian.nii.gz", 0.017,
reference_file="reference_laplacian.nii.gz")
self.assertTrue(res, msg="RMS error is too high")
def test_filtered_sumcurvs(self):
- p = capsul.api.get_process_instance(
+ p = capsul.api.executable(
"highres_cortex.capsul.filtered_sumcurvs")
p.input = os.path.join(self.test_dir, "reference_laplacian.nii.gz")
p.mode = "sum"
p.output = os.path.join(self.test_dir, "curvature.nii.gz")
- p()
+ with self.capsul_engine as ce:
+ ce.run(p)
res = self.result_comp.ensure_max_rms_error(
"curvature.nii.gz", 0.067,
reference_file="reference_curvature.nii.gz")
self.assertTrue(res, msg="RMS error is too high")
def test_advect_euclidean(self):
- p = capsul.api.get_process_instance(
+ p = capsul.api.executable(
"highres_cortex.capsul.processes.EuclideanAdvectionAlongGradient")
p.domain = os.path.join(self.test_dir, "cortex_mask.nii.gz")
p.grad_field = os.path.join(
@@ -109,43 +124,46 @@ def test_advect_euclidean(self):
p.upfield = False
p.output_length = os.path.join(
self.test_dir, "euclidean_adv_toward_white.nii.gz")
- p()
+ with self.capsul_engine as ce:
+ ce.run(p)
res = self.result_comp.ensure_max_rms_error(
"euclidean_adv_toward_white.nii.gz", 0.075,
reference_file="reference_distwhite.nii.gz")
self.assertTrue(res, msg="RMS error is too high")
def test_upwind_euclidean(self):
- p = capsul.api.get_process_instance(
+ p = capsul.api.executable(
"highres_cortex.capsul.processes.EuclideanUpwindingAlongGradient")
p.domain = os.path.join(self.test_dir, "classif.nii.gz")
- p.field = os.path.join(
+ p.scalar_field = os.path.join(
self.test_dir, "reference_laplacian.nii.gz")
p.downfield = True
p.origin_label = 200
p.output = os.path.join(
self.test_dir, "euclidean_upw_toward_white.nii.gz")
- p()
+ with self.capsul_engine as ce:
+ ce.run(p)
res = self.result_comp.ensure_max_rms_error(
"euclidean_upw_toward_white.nii.gz", 0.22,
reference_file="reference_distwhite.nii.gz")
self.assertTrue(res, msg="RMS error is too high")
def test_equivolumetric_pipeline(self):
- p = capsul.api.get_process_instance(
+ p = capsul.api.executable(
"highres_cortex.capsul.isovolume")
p.classif = os.path.join(self.test_dir, "classif.nii.gz")
p.advection_step_size = 0.05
p.equivolumetric_depth = os.path.join(
self.test_dir, "equivolumetric_depth.nii.gz")
- p()
+ with self.capsul_engine as ce:
+ ce.run(p)
res = self.result_comp.ensure_max_rms_error(
"equivolumetric_depth.nii.gz", 0.028,
reference_file="reference_equivolumic.nii.gz")
self.assertTrue(res, msg="RMS error is too high")
def test_thickness_adv_pipeline(self):
- p = capsul.api.get_process_instance(
+ p = capsul.api.executable(
"highres_cortex.capsul.thickness_adv")
p.classif = os.path.join(self.test_dir, "classif.nii.gz")
p.advection_step_size = 0.05
@@ -153,7 +171,8 @@ def test_thickness_adv_pipeline(self):
self.test_dir, "thickness_adv.nii.gz")
p.equidistant_depth = os.path.join(
self.test_dir, "equidistant_depth_adv.nii.gz")
- p()
+ with self.capsul_engine as ce:
+ ce.run(p)
res = self.result_comp.ensure_max_rms_error(
"thickness_adv.nii.gz", 0.12,
reference_file="reference_thickness.nii.gz")
@@ -164,14 +183,15 @@ def test_thickness_adv_pipeline(self):
self.assertTrue(res, msg="RMS error is too high")
def test_thickness_upw_pipeline(self):
- p = capsul.api.get_process_instance(
+ p = capsul.api.executable(
"highres_cortex.capsul.thickness_upw")
p.classif = os.path.join(self.test_dir, "classif.nii.gz")
p.thickness_image = os.path.join(
self.test_dir, "thickness_upw.nii.gz")
p.equidistant_depth = os.path.join(
self.test_dir, "equidistant_depth_upw.nii.gz")
- p()
+ with self.capsul_engine as ce:
+ ce.run(p)
res = self.result_comp.ensure_max_rms_error(
"thickness_upw.nii.gz", 0.27,
reference_file="reference_thickness.nii.gz")
@@ -182,12 +202,13 @@ def test_thickness_upw_pipeline(self):
self.assertTrue(res, msg="RMS error is too high")
def test_traverses_pipeline(self):
- p = capsul.api.get_process_instance(
+ p = capsul.api.executable(
"highres_cortex.capsul.traverses")
p.classif = os.path.join(self.test_dir, "classif.nii.gz")
p.cortical_traverses = os.path.join(
self.test_dir, "traverses.nii.gz")
- p()
+ with self.capsul_engine as ce:
+ ce.run(p)
if __name__ == "__main__":