diff --git a/atlas_densities/app/mtype_densities.py b/atlas_densities/app/mtype_densities.py index 0e03d43..dcb1e03 100644 --- a/atlas_densities/app/mtype_densities.py +++ b/atlas_densities/app/mtype_densities.py @@ -204,7 +204,7 @@ def standardize_probability_map(probability_map: "pd.DataFrame") -> "pd.DataFram Standardize the labels of the rows and the columns of `probability_map` and remove unused rows. - Output labels are all lower case. + Output row labels are all lower case. The underscore is the only delimiter used in an output label. The layer names refered to by output labels are: "layer_1," "layer_23", "layer_4", "layer_5" and "layer_6". @@ -242,9 +242,7 @@ def standardize_column_label(col_label: str): Example: "NGC-SA" -> "ngc_sa" """ - col_label = col_label.replace("-", "_") - - return col_label.lower() + return col_label.replace("-", "_") bbp_mtypes_map = {"DLAC": "LAC", "SLAC": "SAC"} probability_map.rename(bbp_mtypes_map, axis="columns", inplace=True) @@ -312,7 +310,7 @@ def create_from_probability_map( else: probability_map.set_index(probability_map.columns[0], inplace=True) - # Remove useless lines, use lower case and "standardized" explicit label names + # Remove useless lines, use lower case row labels and "standardized" explicit label names probability_map = standardize_probability_map(probability_map) check_probability_map_sanity(probability_map) diff --git a/atlas_densities/densities/mtype_densities_from_map.py b/atlas_densities/densities/mtype_densities_from_map.py index cbadad2..766ec81 100644 --- a/atlas_densities/densities/mtype_densities_from_map.py +++ b/atlas_densities/densities/mtype_densities_from_map.py @@ -60,7 +60,6 @@ def _check_dataframe_labels_sanity(dataframe: "pd.DataFrame") -> None: AtlasBuildingTools error on failure. """ d_f = dataframe.copy() - d_f = d_f.rename(str.lower, axis="columns") d_f = d_f.rename(str.lower, axis="rows") d_f.columns = d_f.columns.str.replace(" ", "") d_f.index = d_f.index.str.replace(" ", "") @@ -161,14 +160,14 @@ def create_from_probability_map( The ouput volumetric density for the mtype named ``mtype`` is saved into `/no_layers` under the name ``_densities.nrrd`` if its sum is not too - close to zero where is uppercase and dash separated. + close to zero, where is dash separated. Example: if mtype = "ngc_sa", then output_file_name = "NGC-SA_densities.nrrd". The restriction of the volumetric density for the mtype named ``mtype`` to layer ``layer_name`` is saved into `/with_layers` under the name ``__densities.nrrd`` if its sum is not too close to zero. The string is of the form "L", e,g, "L1" for "layer_1" and - the string is upper case and dash-separted. + the string is dash-separted. Example: if mtype = "ngc_sa", layer_name = "layer_23", then output_file_name = "L23_NGC-SA_densities.nrrd". @@ -243,10 +242,10 @@ def create_from_probability_map( ) # Saving to file + mtype_filename = f"{mtype.replace('_', '-')}_densities.nrrd" (Path(output_dirpath) / "no_layers").mkdir(exist_ok=True, parents=True) if not np.isclose(np.sum(mtype_density), 0.0): - filename = f"{mtype.upper().replace('_', '-')}_densities.nrrd" - filepath = str(Path(output_dirpath) / "no_layers" / filename) + filepath = str(Path(output_dirpath) / "no_layers" / mtype_filename) L.info("Saving %s ...", filepath) annotation.with_data(mtype_density).save_nrrd(filepath) @@ -254,9 +253,7 @@ def create_from_probability_map( for layer_name, layer_mask in layer_masks.items(): layer_density = np.zeros(layer_mask.shape, dtype=float) layer_density[layer_mask] = mtype_density[layer_mask] - filename = ( - f"L{layer_name.split('_')[-1]}_{mtype.upper().replace('_', '-')}_densities.nrrd" - ) + filename = f"L{layer_name.split('_')[-1]}_{mtype_filename}" if not np.isclose(np.sum(layer_density), 0.0): filepath = str(Path(output_dirpath) / "with_layers" / filename) L.info("Saving %s ...", filepath) diff --git a/tests/app/test_mtype_densities.py b/tests/app/test_mtype_densities.py index d7957ac..6b34fbb 100644 --- a/tests/app/test_mtype_densities.py +++ b/tests/app/test_mtype_densities.py @@ -160,10 +160,10 @@ def test_standardize_probability_map(self): actual = tested.standardize_probability_map(probability_map) expected = pd.DataFrame( { - "chc": [0.5, 0.0, 0.0, 0.0], - "ngc_sa": [0.5, 0.0, 0.0, 0.0], - "lac": [0.0] * 4, - "sac": [0.0] * 4, + "ChC": [0.5, 0.0, 0.0, 0.0], + "NGC_SA": [0.5, 0.0, 0.0, 0.0], + "LAC": [0.0] * 4, + "SAC": [0.0] * 4, }, index=["layer_1_gad67", "layer_6_vip", "layer_23_pv", "layer_4_vip"], ) @@ -176,7 +176,7 @@ def test_output(self): result = get_result_from_probablity_map_(runner) assert result.exit_code == 0 - chc = VoxelData.load_nrrd(str(Path("output_dir") / "no_layers" / "CHC_densities.nrrd")) + chc = VoxelData.load_nrrd(str(Path("output_dir") / "no_layers" / "ChC_densities.nrrd")) assert chc.raw.dtype == float npt.assert_array_equal(chc.voxel_dimensions, self.data["annotation"].voxel_dimensions) diff --git a/tests/densities/test_mtype_densities_from_map.py b/tests/densities/test_mtype_densities_from_map.py index 59e7713..baf00c5 100644 --- a/tests/densities/test_mtype_densities_from_map.py +++ b/tests/densities/test_mtype_densities_from_map.py @@ -136,7 +136,7 @@ def create_from_probability_map_data(): probability_map = DataFrame( { - "chc": [ + "ChC": [ 0.0, 0.0, 0.0, @@ -155,7 +155,7 @@ def create_from_probability_map_data(): 1.0, 1.0, ], - "lac": [ + "LAC": [ 1.0, 1.0, 1.0, @@ -235,7 +235,7 @@ def test_filenames(self): no_layers_filepaths = { Path.resolve(f).name for f in Path(tmpdir, "no_layers").glob("*.nrrd") } - assert no_layers_filepaths == {"CHC_densities.nrrd", "LAC_densities.nrrd"} + assert no_layers_filepaths == {"ChC_densities.nrrd", "LAC_densities.nrrd"} with_layers_filepaths = { Path.resolve(f).name for f in Path(tmpdir, "with_layers").glob("*.nrrd") } @@ -244,19 +244,19 @@ def test_filenames(self): "L4_LAC_densities.nrrd", "L23_LAC_densities.nrrd", "L6_LAC_densities.nrrd", - "L6_CHC_densities.nrrd", - "L5_CHC_densities.nrrd", + "L6_ChC_densities.nrrd", + "L5_ChC_densities.nrrd", } def test_output_consistency(self): sum_ = { - "CHC": np.zeros(self.data["annotation"].shape, dtype=float), + "ChC": np.zeros(self.data["annotation"].shape, dtype=float), "LAC": np.zeros(self.data["annotation"].shape, dtype=float), } tmpdir = self.tmpdir.name - for filename in ["L5_CHC_densities.nrrd", "L6_CHC_densities.nrrd"]: + for filename in ["L5_ChC_densities.nrrd", "L6_ChC_densities.nrrd"]: filepath = str(Path(tmpdir) / "with_layers" / filename) - sum_["CHC"] += VoxelData.load_nrrd(filepath).raw + sum_["ChC"] += VoxelData.load_nrrd(filepath).raw for filename in [ "L1_LAC_densities.nrrd", @@ -267,7 +267,7 @@ def test_output_consistency(self): filepath = str(Path(tmpdir) / "with_layers" / filename) sum_["LAC"] += VoxelData.load_nrrd(filepath).raw - for mtype in ["CHC", "LAC"]: + for mtype in ["ChC", "LAC"]: filepath = str(Path(tmpdir) / "no_layers" / f"{mtype}_densities.nrrd") density = VoxelData.load_nrrd(filepath) npt.assert_array_equal( @@ -279,10 +279,10 @@ def test_output_consistency(self): def test_output_values(self): tmpdir = self.tmpdir.name expected_densities = { - "CHC": np.array([[[0.0, 0.0, 0.0, 2.0, 0.4]]], dtype=float), + "ChC": np.array([[[0.0, 0.0, 0.0, 2.0, 0.4]]], dtype=float), "LAC": np.array([[[1.5, 2.0, 1.0, 0.0, 0.6]]], dtype=float), } - for mtype in ["CHC", "LAC"]: + for mtype in ["ChC", "LAC"]: filepath = str(Path(tmpdir) / "no_layers" / f"{mtype}_densities.nrrd") npt.assert_array_almost_equal( VoxelData.load_nrrd(filepath).raw, expected_densities[mtype] @@ -308,17 +308,17 @@ def create_densities(self): ) def test_probability_map_sanity_negative_probability(self): - self.data["probability_map"].loc["layer_1_gad67", "chc"] = -0.0025 + self.data["probability_map"].loc["layer_1_gad67", "ChC"] = -0.0025 with pytest.raises(AtlasDensitiesError): self.create_densities() def test_probability_map_sanity_row_sum_is_1(self): - self.data["probability_map"].loc["layer_1_gad67", "chc"] = 2.0 + self.data["probability_map"].loc["layer_1_gad67", "ChC"] = 2.0 with pytest.raises(AtlasDensitiesError): self.create_densities() def test_labels_sanity(self): - self.data["probability_map"].rename(str.upper, axis="columns", inplace=True) + self.data["probability_map"].rename(str.upper, axis="rows", inplace=True) with pytest.raises(AtlasDensitiesError): self.create_densities()