diff --git a/examples/quality_control.json b/examples/quality_control.json index 31717377..08d0585e 100644 --- a/examples/quality_control.json +++ b/examples/quality_control.json @@ -1,118 +1,175 @@ { "describedBy": "https://raw.githubusercontent.com/AllenNeuralDynamics/aind-data-schema/main/src/aind_data_schema/core/quality_control.py", "schema_version": "1.0.0", - "overall_status": [ - { - "evaluator": "Automated", - "status": "Pass", - "timestamp": "2022-11-22" - } - ], "evaluations": [ { - "evaluation_modality": { + "modality": { "name": "Extracellular electrophysiology", "abbreviation": "ecephys" }, - "evaluation_stage": "Processing", - "evaluation_name": "Drift map", - "evaluation_description": "Qualitative check that drift map shows minimal movement", - "qc_metrics": [ + "stage": "Raw data", + "name": "Drift map", + "description": "Qualitative check that drift map shows minimal movement", + "metrics": [ { "name": "Probe A drift", - "value": "High", + "value": { + "value": "", + "options": [ + "Low", + "Medium", + "High" + ], + "status": [ + "Pass", + "Fail", + "Fail" + ], + "type": "dropdown" + }, "description": null, - "reference": "ecephys-drift-map" + "reference": "ecephys-drift-map", + "status_history": [ + { + "evaluator": "", + "status": "Pending", + "timestamp": "2022-11-22T00:00:00Z" + } + ] }, { "name": "Probe B drift", - "value": "Low", + "value": { + "value": "", + "options": [ + "Drift visible in entire session", + "Drift visible in part of session", + "Sudden movement event" + ], + "status": [ + "Fail", + "Pass", + "Fail" + ], + "type": "checkbox" + }, "description": null, - "reference": "ecephys-drift-map" + "reference": "ecephys-drift-map", + "status_history": [ + { + "evaluator": "", + "status": "Pending", + "timestamp": "2022-11-22T00:00:00Z" + } + ] }, { "name": "Probe C drift", "value": "Low", "description": null, - "reference": "ecephys-drift-map" - } - ], - "evaluation_status": [ - { - "evaluator": "Fred Flintstone", - "status": "Fail", - "timestamp": "2022-11-22" + "reference": "ecephys-drift-map", + "status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00Z" + } + ] } ], - "notes": "Manually annotated: failed due to high drift on probe A" + "notes": "", + "allow_failed_metrics": false }, { - "evaluation_modality": { + "modality": { "name": "Behavior videos", "abbreviation": "behavior-videos" }, - "evaluation_stage": "Raw data", - "evaluation_name": "Video frame count check", - "evaluation_description": null, - "qc_metrics": [ + "stage": "Raw data", + "name": "Video frame count check", + "description": null, + "metrics": [ { "name": "video_1_num_frames", "value": 662, "description": null, - "reference": null + "reference": null, + "status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00Z" + } + ] }, { "name": "video_2_num_frames", "value": 662, "description": null, - "reference": null + "reference": null, + "status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00Z" + } + ] } ], - "evaluation_status": [ - { - "evaluator": "Fred Flintstone", - "status": "Fail", - "timestamp": "2022-11-22" - } - ], - "notes": "Pass when video_1_num_frames==video_2_num_frames" + "notes": "Pass when video_1_num_frames==video_2_num_frames", + "allow_failed_metrics": false }, { - "evaluation_modality": { + "modality": { "name": "Extracellular electrophysiology", "abbreviation": "ecephys" }, - "evaluation_stage": "Raw data", - "evaluation_name": "Probes present", - "evaluation_description": null, - "qc_metrics": [ + "stage": "Raw data", + "name": "Probes present", + "description": null, + "metrics": [ { "name": "ProbeA_success", "value": true, "description": null, - "reference": null + "reference": null, + "status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00Z" + } + ] }, { "name": "ProbeB_success", "value": true, "description": null, - "reference": null + "reference": null, + "status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00Z" + } + ] }, { "name": "ProbeC_success", "value": true, "description": null, - "reference": null - } - ], - "evaluation_status": [ - { - "evaluator": "Automated", - "status": "Pass", - "timestamp": "2022-11-22" + "reference": null, + "status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00Z" + } + ] } ], - "notes": null + "notes": null, + "allow_failed_metrics": false } ], "notes": null diff --git a/examples/quality_control.py b/examples/quality_control.py index 4ba3b754..df687d4d 100644 --- a/examples/quality_control.py +++ b/examples/quality_control.py @@ -1,51 +1,86 @@ """Example quality control processing""" -from datetime import date +from datetime import datetime, timezone from aind_data_schema_models.modalities import Modality from aind_data_schema.core.quality_control import QCEvaluation, QualityControl, QCMetric, Stage, Status, QCStatus -t = date(2022, 11, 22) +t = datetime(2022, 11, 22, 0, 0, 0, tzinfo=timezone.utc) + +s = QCStatus(evaluator="Automated", status=Status.PASS, timestamp=t) +sp = QCStatus(evaluator="", status=Status.PENDING, timestamp=t) + +# Example of how to use a dictionary to provide options a metric +drift_value_with_options = { + "value": "", + "options": ["Low", "Medium", "High"], + "status": [ + "Pass", + "Fail", + "Fail", + ], # when set, this field will be used to automatically parse the status, blank forces manual update + "type": "dropdown", # other type options: "checkbox" +} + +# Example of how to use a dictionary to provide multiple checkable flags, some of which will fail the metric +drift_value_with_flags = { + "value": "", + "options": ["Drift visible in entire session", "Drift visible in part of session", "Sudden movement event"], + "status": ["Fail", "Pass", "Fail"], + "type": "checkbox", +} eval0 = QCEvaluation( - evaluation_name="Drift map", - evaluation_description="Qualitative check that drift map shows minimal movement", - evaluation_modality=Modality.ECEPHYS, - evaluation_stage=Stage.PROCESSING, - evaluation_status=[QCStatus(evaluator="Fred Flintstone", timestamp=t, status=Status.FAIL)], - qc_metrics=[ - QCMetric(name="Probe A drift", value="High", reference="ecephys-drift-map"), - QCMetric(name="Probe B drift", value="Low", reference="ecephys-drift-map"), - QCMetric(name="Probe C drift", value="Low", reference="ecephys-drift-map"), + name="Drift map", + description="Qualitative check that drift map shows minimal movement", + modality=Modality.ECEPHYS, + stage=Stage.RAW, + metrics=[ + QCMetric( + name="Probe A drift", + value=drift_value_with_options, + reference="ecephys-drift-map", + status_history=[sp], + ), + QCMetric( + name="Probe B drift", + value=drift_value_with_flags, + reference="ecephys-drift-map", + status_history=[sp], + ), + QCMetric(name="Probe C drift", value="Low", reference="ecephys-drift-map", status_history=[s]), ], - notes="Manually annotated: failed due to high drift on probe A", + notes="", ) eval1 = QCEvaluation( - evaluation_name="Video frame count check", - evaluation_modality=Modality.BEHAVIOR_VIDEOS, - evaluation_stage=Stage.RAW, - evaluation_status=[QCStatus(evaluator="Fred Flintstone", timestamp=t, status=Status.FAIL)], - qc_metrics=[QCMetric(name="video_1_num_frames", value=662), QCMetric(name="video_2_num_frames", value=662)], + name="Video frame count check", + modality=Modality.BEHAVIOR_VIDEOS, + stage=Stage.RAW, + metrics=[ + QCMetric(name="video_1_num_frames", value=662, status_history=[s]), + QCMetric(name="video_2_num_frames", value=662, status_history=[s]), + ], notes="Pass when video_1_num_frames==video_2_num_frames", ) eval2 = QCEvaluation( - evaluation_name="Probes present", - evaluation_modality=Modality.ECEPHYS, - evaluation_stage=Stage.RAW, - evaluation_status=[QCStatus(evaluator="Automated", timestamp=t, status=Status.PASS)], - qc_metrics=[ - QCMetric(name="ProbeA_success", value=True), - QCMetric(name="ProbeB_success", value=True), - QCMetric(name="ProbeC_success", value=True), + name="Probes present", + modality=Modality.ECEPHYS, + stage=Stage.RAW, + metrics=[ + QCMetric(name="ProbeA_success", value=True, status_history=[s]), + QCMetric(name="ProbeB_success", value=True, status_history=[s]), + QCMetric(name="ProbeC_success", value=True, status_history=[s]), ], ) -q = QualityControl( - overall_status=[QCStatus(evaluator="Automated", timestamp=t, status=Status.PASS)], evaluations=[eval0, eval1, eval2] -) +q = QualityControl(evaluations=[eval0, eval1, eval2]) + +# This is a special call that needs to be made to populate the .overall_status and .evaluation_status properties +# Note that the timestamp is set here because of how examples testing works, in general you should not set the +# timestamp manually serialized = q.model_dump_json() deserialized = QualityControl.model_validate_json(serialized) diff --git a/quality_control.json b/quality_control.json index e8b55acc..5a345e2f 100644 --- a/quality_control.json +++ b/quality_control.json @@ -1,8 +1,6 @@ { "describedBy": "https://raw.githubusercontent.com/AllenNeuralDynamics/aind-data-schema/main/src/aind_data_schema/core/quality_control.py", "schema_version": "1.0.0", - "overall_status": "Pass", - "overall_status_date": "2022-11-22", "evaluations": [ { "evaluation_modality": { @@ -11,31 +9,56 @@ }, "evaluation_stage": "Processing", "evaluation_name": "Drift map", - "evaluation_desc": "Qualitative check that drift map shows minimal movement", - "evaluator": "Fred Flintstone", - "evaluation_date": "2022-11-22", + "evaluation_description": "Qualitative check that drift map shows minimal movement", "qc_metrics": [ { "name": "Probe A drift", "value": "High", "description": null, - "references": null + "reference": "ecephys-drift-map", + "metric_status_history": [ + { + "evaluator": "Bob", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00" + } + ] }, { "name": "Probe B drift", "value": "Low", "description": null, - "references": null + "reference": "ecephys-drift-map", + "metric_status_history": [ + { + "evaluator": "Bob", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00" + } + ] }, { "name": "Probe C drift", "value": "Low", "description": null, - "references": null + "reference": "ecephys-drift-map", + "metric_status_history": [ + { + "evaluator": "Bob", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00" + } + ] } ], - "stage_status": "Fail", - "notes": "Manually annotated: failed due to high drift on probe A" + "notes": "Manually annotated: failed due to high drift on probe A", + "evaluation_status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2024-09-23T14:04:03.094512" + } + ] }, { "evaluation_modality": { @@ -44,25 +67,43 @@ }, "evaluation_stage": "Raw data", "evaluation_name": "Video frame count check", - "evaluation_desc": null, - "evaluator": "Fred Flinstone", - "evaluation_date": "2022-11-22", + "evaluation_description": null, "qc_metrics": [ { "name": "video_1_num_frames", "value": 662, "description": null, - "references": null + "reference": null, + "metric_status_history": [ + { + "evaluator": "Bob", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00" + } + ] }, { "name": "video_2_num_frames", "value": 662, "description": null, - "references": null + "reference": null, + "metric_status_history": [ + { + "evaluator": "Bob", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00" + } + ] } ], - "stage_status": "Pass", - "notes": "Pass when video_1_num_frames==video_2_num_frames" + "notes": "Pass when video_1_num_frames==video_2_num_frames", + "evaluation_status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2024-09-23T14:04:03.094519" + } + ] }, { "evaluation_modality": { @@ -71,32 +112,64 @@ }, "evaluation_stage": "Raw data", "evaluation_name": "Probes present", - "evaluation_desc": null, - "evaluator": "Automated", - "evaluation_date": "2022-11-22", + "evaluation_description": null, "qc_metrics": [ { "name": "ProbeA_success", "value": true, "description": null, - "references": null + "reference": null, + "metric_status_history": [ + { + "evaluator": "Bob", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00" + } + ] }, { "name": "ProbeB_success", "value": true, "description": null, - "references": null + "reference": null, + "metric_status_history": [ + { + "evaluator": "Bob", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00" + } + ] }, { "name": "ProbeC_success", "value": true, "description": null, - "references": null + "reference": null, + "metric_status_history": [ + { + "evaluator": "Bob", + "status": "Pass", + "timestamp": "2022-11-22T00:00:00" + } + ] } ], - "stage_status": "Pass", - "notes": null + "notes": null, + "evaluation_status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2024-09-23T14:04:03.094522" + } + ] } ], - "notes": null + "notes": null, + "overall_status_history": [ + { + "evaluator": "Automated", + "status": "Pass", + "timestamp": "2024-09-23T14:04:03.094524" + } + ] } \ No newline at end of file diff --git a/src/aind_data_schema/core/quality_control.py b/src/aind_data_schema/core/quality_control.py index 38224df7..b4879ff4 100644 --- a/src/aind_data_schema/core/quality_control.py +++ b/src/aind_data_schema/core/quality_control.py @@ -1,15 +1,12 @@ """ Schemas for Quality Metrics """ -from __future__ import annotations - -from datetime import date from enum import Enum from typing import List, Literal, Optional, Any from aind_data_schema_models.modalities import Modality -from pydantic import Field, BaseModel +from pydantic import Field, BaseModel, field_validator -from aind_data_schema.base import AindCoreModel, AindModel +from aind_data_schema.base import AindCoreModel, AindModel, AwareDatetimeWithDefault class Status(str, Enum): @@ -36,7 +33,7 @@ class QCStatus(BaseModel): evaluator: str = Field(..., title="Status evaluator full name") status: Status = Field(..., title="Status") - timestamp: date = Field(..., title="Status date") + timestamp: AwareDatetimeWithDefault = Field(..., title="Status date") class QCMetric(BaseModel): @@ -46,18 +43,87 @@ class QCMetric(BaseModel): value: Any = Field(..., title="Metric value") description: Optional[str] = Field(default=None, title="Metric description") reference: Optional[str] = Field(default=None, title="Metric reference image URL or plot type") + status_history: List[QCStatus] = Field(default=[], title="Metric status history") + + @property + def status(self) -> QCStatus: + """Get the latest status object for this metric + + Returns + ------- + QCStatus + Most recent status object + """ + return self.status_history[-1] + + @field_validator("status_history") + def validate_status_history(cls, v): + """Ensure that at least one QCStatus object is provided""" + if len(v) == 0: + raise ValueError("At least one QCStatus object must be provided") + return v class QCEvaluation(AindModel): """Description of one evaluation stage, with one or more metrics""" - evaluation_modality: Modality.ONE_OF = Field(..., title="Modality") - evaluation_stage: Stage = Field(..., title="Evaluation stage") - evaluation_name: str = Field(..., title="Evaluation name") - evaluation_description: Optional[str] = Field(default=None, title="Evaluation description") - qc_metrics: List[QCMetric] = Field(..., title="QC metrics") - evaluation_status: List[QCStatus] = Field(..., title="Evaluation status") + modality: Modality.ONE_OF = Field(..., title="Modality") + stage: Stage = Field(..., title="Evaluation stage") + name: str = Field(..., title="Evaluation name") + description: Optional[str] = Field(default=None, title="Evaluation description") + metrics: List[QCMetric] = Field(..., title="QC metrics") notes: Optional[str] = Field(default=None, title="Notes") + allow_failed_metrics: bool = Field( + default=False, + title="Allow metrics to fail", + description=( + "Set to true for evaluations that are not critical to the overall state of QC for a data asset, this" + " will allow individual metrics to fail while still passing the evaluation." + ), + ) + + @property + def status(self) -> Status: + """Loop through all metrics and return the evaluation's status + + Any fail -> FAIL + If no fails, then any pending -> PENDING + All PASS -> PASS + + Returns + ------- + Status + Current status of the evaluation + """ + latest_metric_statuses = [metric.status.status for metric in self.metrics] + + if (not self.allow_failed_metrics) and any(status == Status.FAIL for status in latest_metric_statuses): + return Status.FAIL + elif any(status == Status.PENDING for status in latest_metric_statuses): + return Status.PENDING + + return Status.PASS + + @property + def failed_metrics(self) -> Optional[List[QCMetric]]: + """Return any metrics that are failing + + Returns none if allow_failed_metrics is False + + Returns + ------- + list[QCMetric] + Metrics that fail + """ + if not self.allow_failed_metrics: + return None + else: + failing_metrics = [] + for metric in self.metrics: + if metric.status.status == Status.FAIL: + failing_metrics.append(metric) + + return failing_metrics class QualityControl(AindCoreModel): @@ -66,6 +132,22 @@ class QualityControl(AindCoreModel): _DESCRIBED_BY_URL = AindCoreModel._DESCRIBED_BY_BASE_URL.default + "aind_data_schema/core/quality_control.py" describedBy: str = Field(_DESCRIBED_BY_URL, json_schema_extra={"const": _DESCRIBED_BY_URL}) schema_version: Literal["1.0.0"] = Field("1.0.0") - overall_status: List[QCStatus] = Field(..., title="Overall status") evaluations: List[QCEvaluation] = Field(..., title="Evaluations") notes: Optional[str] = Field(default=None, title="Notes") + + @property + def status(self) -> Status: + """Loop through all evaluations and return the overall status + + Any FAIL -> FAIL + If no fails, then any PENDING -> PENDING + All PASS -> PASS + """ + eval_statuses = [evaluation.status for evaluation in self.evaluations] + + if any(status == Status.FAIL for status in eval_statuses): + return Status.FAIL + elif any(status == Status.PENDING for status in eval_statuses): + return Status.PENDING + + return Status.PASS diff --git a/tests/test_quality_control.py b/tests/test_quality_control.py index acccab46..1b287f05 100644 --- a/tests/test_quality_control.py +++ b/tests/test_quality_control.py @@ -1,7 +1,7 @@ """test quality metrics """ import unittest -from datetime import date +from datetime import datetime from aind_data_schema_models.modalities import Modality from pydantic import ValidationError @@ -19,30 +19,269 @@ def test_constructors(self): q = QualityControl() test_eval = QCEvaluation( - evaluation_name="Drift map", - evaluation_status=[ - QCStatus(evaluator="Fred Flintstone", timestamp=date.fromisoformat("2020-10-10"), status=Status.PASS) - ], - evaluation_modality=Modality.ECEPHYS, - evaluation_stage=Stage.PROCESSING, - qc_metrics=[ - QCMetric(name="Multiple values example", value={"stuff": "in_a_dict"}), + name="Drift map", + modality=Modality.ECEPHYS, + stage=Stage.PROCESSING, + metrics=[ + QCMetric( + name="Multiple values example", + value={"stuff": "in_a_dict"}, + status_history=[ + QCStatus(evaluator="Bob", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS) + ], + ), QCMetric( name="Drift map pass/fail", value=False, description="Manual evaluation of whether the drift map looks good", - references=["s3://some-data-somewhere"], + reference="s3://some-data-somewhere", + status_history=[ + QCStatus(evaluator="Bob", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS) + ], ), ], ) q = QualityControl( - overall_status=[QCStatus(evaluator="Bob", timestamp=date.fromisoformat("2020-10-10"), status=Status.PASS)], evaluations=[test_eval], ) assert q is not None + def test_overall_status(self): + """test that overall status goes to pass/pending/fail correctly""" + + test_eval = QCEvaluation( + name="Drift map", + modality=Modality.ECEPHYS, + stage=Stage.PROCESSING, + metrics=[ + QCMetric( + name="Multiple values example", + value={"stuff": "in_a_dict"}, + status_history=[ + QCStatus(evaluator="Bob", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS) + ], + ), + QCMetric( + name="Drift map pass/fail", + value=False, + description="Manual evaluation of whether the drift map looks good", + reference="s3://some-data-somewhere", + status_history=[ + QCStatus(evaluator="Bob", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS) + ], + ), + ], + ) + + # check that evaluation status gets auto-set if it has never been set before + self.assertEqual(test_eval.status, Status.PASS) + + q = QualityControl( + evaluations=[test_eval, test_eval], + ) + + # check that overall status gets auto-set if it has never been set before + self.assertEqual(q.status, Status.PASS) + + # Add a pending metric to the first evaluation + q.evaluations[0].metrics.append( + QCMetric( + name="Drift map pass/fail", + value=False, + description="Manual evaluation of whether the drift map looks good", + reference="s3://some-data-somewhere", + status_history=[ + QCStatus( + evaluator="Automated", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PENDING + ) + ], + ) + ) + + self.assertEqual(q.status, Status.PENDING) + + # Add a failing metric to the first evaluation + q.evaluations[0].metrics.append( + QCMetric( + name="Drift map pass/fail", + value=False, + description="Manual evaluation of whether the drift map looks good", + reference="s3://some-data-somewhere", + status_history=[ + QCStatus(evaluator="Automated", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.FAIL) + ], + ) + ) + + self.assertEqual(q.status, Status.FAIL) + + def test_evaluation_status(self): + """test that evaluation status goes to pass/pending/fail correctly""" + evaluation = QCEvaluation( + name="Drift map", + modality=Modality.ECEPHYS, + stage=Stage.PROCESSING, + metrics=[ + QCMetric( + name="Multiple values example", + value={"stuff": "in_a_dict"}, + status_history=[ + QCStatus( + evaluator="Automated", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS + ) + ], + ), + QCMetric( + name="Drift map pass/fail", + value=False, + description="Manual evaluation of whether the drift map looks good", + reference="s3://some-data-somewhere", + status_history=[ + QCStatus( + evaluator="Automated", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS + ) + ], + ), + ], + ) + + self.assertEqual(evaluation.status, Status.PASS) + + # Add a pending metric, evaluation should now evaluate to pending + evaluation.metrics.append( + QCMetric( + name="Drift map pass/fail", + value=False, + description="Manual evaluation of whether the drift map looks good", + reference="s3://some-data-somewhere", + status_history=[ + QCStatus( + evaluator="Automated", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PENDING + ) + ], + ) + ) + + self.assertEqual(evaluation.status, Status.PENDING) + + # Add a failing metric, evaluation should now evaluate to fail + evaluation.metrics.append( + QCMetric( + name="Drift map pass/fail", + value=False, + description="Manual evaluation of whether the drift map looks good", + reference="s3://some-data-somewhere", + status_history=[ + QCStatus(evaluator="Automated", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.FAIL) + ], + ) + ) + + self.assertEqual(evaluation.status, Status.FAIL) + + def test_allowed_failed_metrics(self): + """Test that if you set the flag to allow failures that evaluations pass""" + + metric2 = QCMetric( + name="Drift map pass/fail", + value=False, + description="Manual evaluation of whether the drift map looks good", + reference="s3://some-data-somewhere", + status_history=[ + QCStatus(evaluator="Automated", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PENDING) + ], + ) + + # First check that a pending evaluation still evaluates properly + evaluation = QCEvaluation( + name="Drift map", + modality=Modality.ECEPHYS, + stage=Stage.PROCESSING, + allow_failed_metrics=False, + metrics=[ + QCMetric( + name="Multiple values example", + value={"stuff": "in_a_dict"}, + status_history=[ + QCStatus( + evaluator="Automated", timestamp=datetime.fromisoformat("2020-10-10"), status=Status.PASS + ) + ], + ), + metric2, + ], + ) + + self.assertIsNone(evaluation.failed_metrics) + + evaluation.allow_failed_metrics = True + + self.assertEqual(evaluation.status, Status.PENDING) + + # Replace the pending evaluation with a fail, evaluation should not evaluate to pass + evaluation.metrics[1].status_history[0].status = Status.FAIL + + self.assertEqual(evaluation.status, Status.PASS) + + metric2.status_history[0].status = Status.FAIL + self.assertEqual(evaluation.failed_metrics, [metric2]) + + def test_metric_history_order(self): + """Test that the order of the metric status history list is preserved when dumping""" + t0 = datetime.fromisoformat("2020-10-10") + t1 = datetime.fromisoformat("2020-10-11") + t2 = datetime.fromisoformat("2020-10-12") + + evaluation = QCEvaluation( + name="Drift map", + modality=Modality.ECEPHYS, + stage=Stage.PROCESSING, + metrics=[ + QCMetric( + name="Multiple values example", + value={"stuff": "in_a_dict"}, + status_history=[ + QCStatus(evaluator="Automated", timestamp=t0, status=Status.PASS), + QCStatus(evaluator="Automated", timestamp=t1, status=Status.PASS), + QCStatus(evaluator="Automated", timestamp=t2, status=Status.PASS), + ], + ), + ], + ) + + # roundtrip to json to check that metric order is preserved + json = evaluation.model_dump_json() + evaluation_rebuild = QCEvaluation.model_validate_json(json) + + # because the actual model uses AwareDatetime objects we have to strip the timezone + roundtrip_t0 = evaluation_rebuild.metrics[0].status_history[0].timestamp + roundtrip_t1 = evaluation_rebuild.metrics[0].status_history[1].timestamp + roundtrip_t2 = evaluation_rebuild.metrics[0].status_history[2].timestamp + + roundtrip_t0 = roundtrip_t0.replace(tzinfo=None) + roundtrip_t1 = roundtrip_t1.replace(tzinfo=None) + roundtrip_t2 = roundtrip_t2.replace(tzinfo=None) + + self.assertEqual(roundtrip_t0, t0) + self.assertEqual(roundtrip_t1, t1) + self.assertEqual(roundtrip_t2, t2) + + def test_metric_status(self): + """Ensure that at least one status object exists for metric_status_history""" + + with self.assertRaises(ValueError) as context: + QCMetric( + name="Multiple values example", + value={"stuff": "in_a_dict"}, + status_history=[], + ) + + expected_exception = "At least one QCStatus object must be provided" + + self.assertTrue(expected_exception in repr(context.exception)) + if __name__ == "__main__": unittest.main()