Skip to content

Commit

Permalink
Do not modify input labels in density filenames (#32)
Browse files Browse the repository at this point in the history
* Do not modify input labes
  • Loading branch information
lecriste authored Jun 15, 2023
1 parent eb4f8e9 commit 1ad5b35
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 32 deletions.
8 changes: 3 additions & 5 deletions atlas_densities/app/mtype_densities.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def standardize_probability_map(probability_map: "pd.DataFrame") -> "pd.DataFram
Standardize the labels of the rows and the columns of `probability_map` and
remove unused rows.
Output labels are all lower case.
Output row labels are all lower case.
The underscore is the only delimiter used in an output label.
The layer names refered to by output labels are:
"layer_1," "layer_23", "layer_4", "layer_5" and "layer_6".
Expand Down Expand Up @@ -242,9 +242,7 @@ def standardize_column_label(col_label: str):
Example: "NGC-SA" -> "ngc_sa"
"""
col_label = col_label.replace("-", "_")

return col_label.lower()
return col_label.replace("-", "_")

bbp_mtypes_map = {"DLAC": "LAC", "SLAC": "SAC"}
probability_map.rename(bbp_mtypes_map, axis="columns", inplace=True)
Expand Down Expand Up @@ -312,7 +310,7 @@ def create_from_probability_map(
else:
probability_map.set_index(probability_map.columns[0], inplace=True)

# Remove useless lines, use lower case and "standardized" explicit label names
# Remove useless lines, use lower case row labels and "standardized" explicit label names
probability_map = standardize_probability_map(probability_map)
check_probability_map_sanity(probability_map)

Expand Down
13 changes: 5 additions & 8 deletions atlas_densities/densities/mtype_densities_from_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ def _check_dataframe_labels_sanity(dataframe: "pd.DataFrame") -> None:
AtlasBuildingTools error on failure.
"""
d_f = dataframe.copy()
d_f = d_f.rename(str.lower, axis="columns")
d_f = d_f.rename(str.lower, axis="rows")
d_f.columns = d_f.columns.str.replace(" ", "")
d_f.index = d_f.index.str.replace(" ", "")
Expand Down Expand Up @@ -161,14 +160,14 @@ def create_from_probability_map(
The ouput volumetric density for the mtype named ``mtype`` is saved into
`<output_dirpath>/no_layers` under the name ``<mtype>_densities.nrrd`` if its sum is not too
close to zero where <mtype> is uppercase and dash separated.
close to zero, where <mtype> is dash separated.
Example: if mtype = "ngc_sa", then output_file_name = "NGC-SA_densities.nrrd".
The restriction of the volumetric density for the mtype named ``mtype`` to layer
``layer_name`` is saved into `<output_dirpath>/with_layers` under the name
``<layer_name>_<mtype>_densities.nrrd`` if its sum is not too close to zero.
The string <layer_name> is of the form "L<layer_index>", e,g, "L1" for "layer_1" and
the string <mtype> is upper case and dash-separted.
the string <mtype> is dash-separted.
Example: if mtype = "ngc_sa", layer_name = "layer_23", then
output_file_name = "L23_NGC-SA_densities.nrrd".
Expand Down Expand Up @@ -243,20 +242,18 @@ def create_from_probability_map(
)

# Saving to file
mtype_filename = f"{mtype.replace('_', '-')}_densities.nrrd"
(Path(output_dirpath) / "no_layers").mkdir(exist_ok=True, parents=True)
if not np.isclose(np.sum(mtype_density), 0.0):
filename = f"{mtype.upper().replace('_', '-')}_densities.nrrd"
filepath = str(Path(output_dirpath) / "no_layers" / filename)
filepath = str(Path(output_dirpath) / "no_layers" / mtype_filename)
L.info("Saving %s ...", filepath)
annotation.with_data(mtype_density).save_nrrd(filepath)

(Path(output_dirpath) / "with_layers").mkdir(exist_ok=True, parents=True)
for layer_name, layer_mask in layer_masks.items():
layer_density = np.zeros(layer_mask.shape, dtype=float)
layer_density[layer_mask] = mtype_density[layer_mask]
filename = (
f"L{layer_name.split('_')[-1]}_{mtype.upper().replace('_', '-')}_densities.nrrd"
)
filename = f"L{layer_name.split('_')[-1]}_{mtype_filename}"
if not np.isclose(np.sum(layer_density), 0.0):
filepath = str(Path(output_dirpath) / "with_layers" / filename)
L.info("Saving %s ...", filepath)
Expand Down
10 changes: 5 additions & 5 deletions tests/app/test_mtype_densities.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,10 +160,10 @@ def test_standardize_probability_map(self):
actual = tested.standardize_probability_map(probability_map)
expected = pd.DataFrame(
{
"chc": [0.5, 0.0, 0.0, 0.0],
"ngc_sa": [0.5, 0.0, 0.0, 0.0],
"lac": [0.0] * 4,
"sac": [0.0] * 4,
"ChC": [0.5, 0.0, 0.0, 0.0],
"NGC_SA": [0.5, 0.0, 0.0, 0.0],
"LAC": [0.0] * 4,
"SAC": [0.0] * 4,
},
index=["layer_1_gad67", "layer_6_vip", "layer_23_pv", "layer_4_vip"],
)
Expand All @@ -176,7 +176,7 @@ def test_output(self):
result = get_result_from_probablity_map_(runner)
assert result.exit_code == 0

chc = VoxelData.load_nrrd(str(Path("output_dir") / "no_layers" / "CHC_densities.nrrd"))
chc = VoxelData.load_nrrd(str(Path("output_dir") / "no_layers" / "ChC_densities.nrrd"))
assert chc.raw.dtype == float
npt.assert_array_equal(chc.voxel_dimensions, self.data["annotation"].voxel_dimensions)

Expand Down
28 changes: 14 additions & 14 deletions tests/densities/test_mtype_densities_from_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def create_from_probability_map_data():

probability_map = DataFrame(
{
"chc": [
"ChC": [
0.0,
0.0,
0.0,
Expand All @@ -155,7 +155,7 @@ def create_from_probability_map_data():
1.0,
1.0,
],
"lac": [
"LAC": [
1.0,
1.0,
1.0,
Expand Down Expand Up @@ -235,7 +235,7 @@ def test_filenames(self):
no_layers_filepaths = {
Path.resolve(f).name for f in Path(tmpdir, "no_layers").glob("*.nrrd")
}
assert no_layers_filepaths == {"CHC_densities.nrrd", "LAC_densities.nrrd"}
assert no_layers_filepaths == {"ChC_densities.nrrd", "LAC_densities.nrrd"}
with_layers_filepaths = {
Path.resolve(f).name for f in Path(tmpdir, "with_layers").glob("*.nrrd")
}
Expand All @@ -244,19 +244,19 @@ def test_filenames(self):
"L4_LAC_densities.nrrd",
"L23_LAC_densities.nrrd",
"L6_LAC_densities.nrrd",
"L6_CHC_densities.nrrd",
"L5_CHC_densities.nrrd",
"L6_ChC_densities.nrrd",
"L5_ChC_densities.nrrd",
}

def test_output_consistency(self):
sum_ = {
"CHC": np.zeros(self.data["annotation"].shape, dtype=float),
"ChC": np.zeros(self.data["annotation"].shape, dtype=float),
"LAC": np.zeros(self.data["annotation"].shape, dtype=float),
}
tmpdir = self.tmpdir.name
for filename in ["L5_CHC_densities.nrrd", "L6_CHC_densities.nrrd"]:
for filename in ["L5_ChC_densities.nrrd", "L6_ChC_densities.nrrd"]:
filepath = str(Path(tmpdir) / "with_layers" / filename)
sum_["CHC"] += VoxelData.load_nrrd(filepath).raw
sum_["ChC"] += VoxelData.load_nrrd(filepath).raw

for filename in [
"L1_LAC_densities.nrrd",
Expand All @@ -267,7 +267,7 @@ def test_output_consistency(self):
filepath = str(Path(tmpdir) / "with_layers" / filename)
sum_["LAC"] += VoxelData.load_nrrd(filepath).raw

for mtype in ["CHC", "LAC"]:
for mtype in ["ChC", "LAC"]:
filepath = str(Path(tmpdir) / "no_layers" / f"{mtype}_densities.nrrd")
density = VoxelData.load_nrrd(filepath)
npt.assert_array_equal(
Expand All @@ -279,10 +279,10 @@ def test_output_consistency(self):
def test_output_values(self):
tmpdir = self.tmpdir.name
expected_densities = {
"CHC": np.array([[[0.0, 0.0, 0.0, 2.0, 0.4]]], dtype=float),
"ChC": np.array([[[0.0, 0.0, 0.0, 2.0, 0.4]]], dtype=float),
"LAC": np.array([[[1.5, 2.0, 1.0, 0.0, 0.6]]], dtype=float),
}
for mtype in ["CHC", "LAC"]:
for mtype in ["ChC", "LAC"]:
filepath = str(Path(tmpdir) / "no_layers" / f"{mtype}_densities.nrrd")
npt.assert_array_almost_equal(
VoxelData.load_nrrd(filepath).raw, expected_densities[mtype]
Expand All @@ -308,17 +308,17 @@ def create_densities(self):
)

def test_probability_map_sanity_negative_probability(self):
self.data["probability_map"].loc["layer_1_gad67", "chc"] = -0.0025
self.data["probability_map"].loc["layer_1_gad67", "ChC"] = -0.0025
with pytest.raises(AtlasDensitiesError):
self.create_densities()

def test_probability_map_sanity_row_sum_is_1(self):
self.data["probability_map"].loc["layer_1_gad67", "chc"] = 2.0
self.data["probability_map"].loc["layer_1_gad67", "ChC"] = 2.0
with pytest.raises(AtlasDensitiesError):
self.create_densities()

def test_labels_sanity(self):
self.data["probability_map"].rename(str.upper, axis="columns", inplace=True)
self.data["probability_map"].rename(str.upper, axis="rows", inplace=True)
with pytest.raises(AtlasDensitiesError):
self.create_densities()

Expand Down

0 comments on commit 1ad5b35

Please sign in to comment.