diff --git a/.zenodo.json b/.zenodo.json
index 02d910a0..73e0a211 100644
--- a/.zenodo.json
+++ b/.zenodo.json
@@ -26,9 +26,8 @@
"orcid": "0000-0002-6402-9879"
},
{
- "name": "Austin Gerlt",
- "orcid": "0000-0002-2204-2055",
- "affiliation": "The Ohio State University"
+ "name": "Viljar Johan Femoen",
+ "affiliation": "Norwegian University of Science and Technology"
},
{
"name": "Anders Christian Mathisen",
@@ -45,16 +44,25 @@
"affiliation": "University of Wisconsin Madison"
},
{
- "name": "Simon Høgås"
+ "name": "Austin Gerlt",
+ "orcid": "0000-0002-2204-2055",
+ "affiliation": "The Ohio State University"
},
{
- "name": "Viljar Johan Femoen",
- "affiliation": "Norwegian University of Science and Technology"
+ "name": "Simon Høgås"
},
{
"name": "Alessandra da Silva",
"orcid": "0000-0003-0465-504X"
},
+ {
+ "name": "Ondrej Lexa",
+ "orcid": "0000-0003-4616-9154",
+ "affiliation": "Charles University, Prague"
+ },
+ {
+ "name": "Eric Prestat"
+ },
{
"name": "Alexander Clausen",
"orcid": "0000-0002-9555-7455",
diff --git a/orix/__init__.py b/orix/__init__.py
index 47a6f19a..87273fd4 100644
--- a/orix/__init__.py
+++ b/orix/__init__.py
@@ -25,14 +25,16 @@
"Ben Martineau",
"Paddy Harrison",
"Phillip Crout",
- "Austin Gerlt",
"Duncan Johnstone",
"Niels Cautaerts",
+ "Viljar Johan Femoen",
"Anders Christian Mathisen",
"Zhou Xu",
"Carter Francis",
+ "Austin Gerlt",
"Simon Høgås",
- "Viljar Johan Femoen",
"Alessandra da Silva",
+ "Ondrej Lexa",
+ "Eric Prestat",
"Alexander Clausen",
]
diff --git a/orix/io/plugins/ctf.py b/orix/io/plugins/ctf.py
index dfdc7994..08249b1b 100644
--- a/orix/io/plugins/ctf.py
+++ b/orix/io/plugins/ctf.py
@@ -1,4 +1,5 @@
-# Copyright 2018-2024 the orix developers
+#
+# Copyright 2019-2025 the orix developers
#
# This file is part of orix.
#
@@ -9,11 +10,12 @@
#
# orix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with orix. If not, see .
+# along with orix. If not, see .
+#
"""Reader of a crystal map from a file in the Channel Text File (CTF)
format.
@@ -21,14 +23,14 @@
from io import TextIOWrapper
import re
-from typing import Dict, List, Tuple
+from typing import Any, Literal
from diffpy.structure import Lattice, Structure
import numpy as np
-from orix.crystal_map import CrystalMap, PhaseList
-from orix.crystal_map.crystal_map import _data_slices_from_coordinates
-from orix.quaternion import Rotation
+from orix.crystal_map.crystal_map import CrystalMap, _data_slices_from_coordinates
+from orix.crystal_map.phase_list import PhaseList
+from orix.quaternion.rotation import Rotation
__all__ = ["file_reader"]
@@ -45,8 +47,8 @@ def file_reader(filename: str) -> CrystalMap:
The map in the input is assumed to be 2D.
- Many vendors/programs produce a .ctf files. Files from the following
- vendors/programs are tested:
+ Many vendors/programs can write a .ctf file. Files from the
+ following vendors/programs are tested:
* Oxford Instruments AZtec
* Bruker Esprit
@@ -74,6 +76,15 @@ def file_reader(filename: str) -> CrystalMap:
deviation (MAD), band contrast (BC), and band slope (BS) renamed to
DP (dot product), OSM (orientation similarity metric), and IQ (image
quality), respectively.
+
+ Description of error codes provided in CTF file:
+ - 0: Success
+ - 1: Low BC
+ - 2: Low BS
+ - 3: No solution
+ - 4: High MAD
+ - 5: Not yet analyzed (job cancelled before point)
+ - 6: Unexpected error (exceptions etc.)
"""
with open(filename, "r") as f:
header, data_starting_row, vendor = _get_header(f)
@@ -143,9 +154,11 @@ def file_reader(filename: str) -> CrystalMap:
return CrystalMap(**data_dict)
-def _get_header(file: TextIOWrapper) -> Tuple[List[str], int, List[str]]:
- """Return file header, row number of start of data in file, and the
- detected vendor(s).
+def _get_header(
+ file: TextIOWrapper,
+) -> tuple[list[str], int, Literal["oxford_or_bruker", "emsoft", "astar", "mtex"]]:
+ """Return file header, row number for the first line with data, and
+ the detected vendor.
Parameters
----------
@@ -155,17 +168,16 @@ def _get_header(file: TextIOWrapper) -> Tuple[List[str], int, List[str]]:
Returns
-------
header
- List with header lines as individual elements.
+ List with header lines.
data_starting_row
- The starting row number for the data lines
+ Row number for the first line with data.
vendor
- Vendor detected based on some header pattern. Default is to
- assume Oxford/Bruker, ``"oxford_or_bruker"`` (assuming no
- difference between the two vendor's CTF formats). Other options
- are ``"emsoft"``, ``"astar"``, and ``"mtex"``.
+ Detected vendor. Default is to assume Oxford or Bruker,
+ "oxford_or_bruker" (assuming identical CTF formatting). Other
+ options are "emsoft", "astar", and "mtex".
"""
vendor = []
- vendor_pattern = {
+ vendor_patterns = {
"emsoft": re.compile(
(
r"EMsoft v\. ([A-Za-z0-9]+(_[A-Za-z0-9]+)+); BANDS=pattern index, "
@@ -176,36 +188,40 @@ def _get_header(file: TextIOWrapper) -> Tuple[List[str], int, List[str]]:
"mtex": re.compile("(?<=)Created from mtex"),
}
+ # Keep header lines and any matching vendor patterns (potentially
+ # more than one)
header = []
line = file.readline()
- i = 0
- # Prevent endless loop by not reading past 1 000 lines
- while not line.startswith("Phase\tX\tY") and i < 1_000:
- for k, v in vendor_pattern.items():
- if v.search(line):
+ data_starting_row = 0
+ max_header_lines = 1_000
+ while data_starting_row < max_header_lines and not line.startswith("Phase\tX\tY"):
+ for k, v in vendor_patterns.items():
+ match = v.search(line)
+ if match is not None:
vendor.append(k)
header.append(line.rstrip())
- i += 1
+ data_starting_row += 1
line = file.readline()
- vendor = vendor[0] if len(vendor) == 1 else "oxford_or_bruker"
+ if len(vendor) == 1:
+ vendor = vendor[0]
+ else:
+ vendor = "oxford_or_bruker"
- return header, i + 1, vendor
+ return header, data_starting_row + 1, vendor
-def _get_phases_from_header(header: List[str]) -> dict:
- """Return phase names and symmetries detected in a .ctf file header.
+def _get_phases_from_header(header: list[str]) -> dict[str, list]:
+ """Return phase names and symmetries detected in a CTF file header.
Parameters
----------
header
- List with header lines as individual elements.
- vendor
- Vendor of the file.
+ List with header lines.
Returns
-------
- phase_dict
+ phases_dict
Dictionary with the following keys (and types): "ids" (int),
"names" (str), "space_groups" (int), "point_groups" (str),
"lattice_constants" (list of floats).
@@ -215,53 +231,52 @@ def _get_phases_from_header(header: List[str]) -> dict:
This function has been tested with files from the following vendor's
formats: Oxford AZtec HKL v5/v6 and EMsoft v4/v5.
"""
- phases = {
+ phases_dict = {
"ids": [],
"names": [],
- "point_groups": [],
"space_groups": [],
+ "point_groups": [],
"lattice_constants": [],
}
- for i, line in enumerate(header):
+ for line_number, line in enumerate(header):
if line.startswith("Phases"):
break
n_phases = int(line.split("\t")[1])
- # Laue classes
- laue_ids = [
- "-1",
- "2/m",
- "mmm",
- "4/m",
- "4/mmm",
- "-3",
- "-3m",
- "6/m",
- "6/mmm",
- "m3",
- "m-3m",
- ]
+ laue_ids = {
+ 1: "-1",
+ 2: "2/m",
+ 3: "mmm",
+ 4: "4/m",
+ 5: "4/mmm",
+ 6: "-3",
+ 7: "-3m",
+ 8: "6/m",
+ 9: "6/mmm",
+ 10: "m3",
+ 11: "m-3m",
+ }
- for j in range(n_phases):
- phase_data = header[i + 1 + j].split("\t")
- phases["ids"].append(j + 1)
+ for i in range(n_phases):
+ phase_data = header[line_number + 1 + i].split("\t")
+ phases_dict["ids"].append(i + 1)
abcABG = ";".join(phase_data[:2])
abcABG = abcABG.split(";")
- abcABG = [float(i.replace(",", ".")) for i in abcABG]
- phases["lattice_constants"].append(abcABG)
- phases["names"].append(phase_data[2])
+ abcABG = [float(lat.replace(",", ".")) for lat in abcABG]
+ phases_dict["lattice_constants"].append(abcABG)
+ phases_dict["names"].append(phase_data[2])
laue_id = int(phase_data[3])
- phases["point_groups"].append(laue_ids[laue_id - 1])
+ phases_dict["point_groups"].append(laue_ids[laue_id])
sg = int(phase_data[4])
if sg == 0:
sg = None
- phases["space_groups"].append(sg)
+ phases_dict["space_groups"].append(sg)
- return phases
+ return phases_dict
-def _fix_astar_coords(header: List[str], data_dict: dict) -> dict:
+def _fix_astar_coords(header: list[str], data_dict: dict[str, Any]) -> dict[str, Any]:
"""Return the data dictionary with coordinate arrays possibly fixed
for ASTAR Index files.
@@ -301,7 +316,7 @@ def _fix_astar_coords(header: List[str], data_dict: dict) -> dict:
return data_dict
-def _get_xy_step(header: List[str]) -> Dict[str, float]:
+def _get_xy_step(header: list[str]) -> dict[str, float]:
pattern_step = re.compile(r"(?<=[XY]Step[\t\s])(.*)")
steps = {"x": None, "y": None}
for line in header:
@@ -315,7 +330,7 @@ def _get_xy_step(header: List[str]) -> Dict[str, float]:
return steps
-def _get_xy_cells(header: List[str]) -> Dict[str, int]:
+def _get_xy_cells(header: list[str]) -> dict[str, int]:
pattern_cells = re.compile(r"(?<=[XY]Cells[\t\s])(.*)")
cells = {"x": None, "y": None}
for line in header: