From 47974b847fe89ffa2d782858b9ffa847a31a720c Mon Sep 17 00:00:00 2001 From: Marina D'Amato Date: Wed, 29 Nov 2023 10:56:43 +0100 Subject: [PATCH] first upload of automatically updated bib file --- diag.bib | 12405 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 8771 insertions(+), 3634 deletions(-) diff --git a/diag.bib b/diag.bib index 305967f..7ea0361 100644 --- a/diag.bib +++ b/diag.bib @@ -25,7 +25,6 @@ @string{ACTOPHSCA @string{ACTOTT = _Acta_Orthopaedica_et_Traumatologica_Turcica_} @string{ACTP = _Acta_Paediatrica_} @string{ACTR = _Acta_Radiologica_} -@string{Acustica = _Acustica_} @string{ADVCI = _Advances_in_Computational_Intelligence_} @string{ADVDDR = _Advanced_Drug_Delivery_Reviews_} @string{ADVEMB = _Advances_in_Experimental_Medicine_and_Biology_} @@ -69,11 +68,9 @@ @string{AJRCCM @string{AJS = _American_Journal_of_Surgery_} @string{AJSP = _American_Journal_of_Surgical_Pathology_} @string{AKTRAD = _Aktuelle_Radiologie_} -@string{Algorithmica = _Algorithmica_} @string{AM = _Academic_Medicine_} @string{AMSTAT = _American_Statistician_} @string{ANALCHEM = _Analytical_Chemistry_} -@string{Angiogenesis = _Angiogenesis_} @string{ANIMRS = _Animal_Reproduction_Science_} @string{ANNREVPAT = _Annual_Review_of_Pathology_} @string{AOAMS = _Annals_of_the_Academy_of_Medicine_Singapore_} @@ -93,7 +90,6 @@ @string{AOSO @string{AOSURG = _Annals_of_Surgery_} @string{AOTS = _Annals_of_Thoracic_Surgery_} @string{AOVS = _Annals_of_Vascular_Surgery_} -@string{Apidologie = _Apidologie_} @string{APMR = _Applied_Magnetic_Resonance_} @string{APOPT = _Applied_Optics_} @string{APRAD = _Applied_Radiology_} @@ -117,40 +113,37 @@ @string{ARTR @string{ARVO = _Association_for_Research_in_Vision_and_Ophthalmology_} @string{ASTJ = _Astrophysical_Journal_} @string{ASTP = _Astroparticle_Physics_} -@string{Atherosclerosis = _Atherosclerosis_} @string{ATM = _Annals_of_Translational_Medicine_} @string{ATS = _American_Thoracic_Society_International_Conference_} @string{ATVB = _Arteriosclerosis_Thrombosis_and_Vascular_Biology_} @string{AUSNZJO = _Australian_and_New_Zealand_Journal_of_Ophthalmology_} @string{AUSPESM = _Australasian_Physical_and_Engineering_Sciences_in_Medicine_} @string{AUSRAD = _Australasian_Radiology_} +@string{Acustica = _Acustica_} +@string{Algorithmica = _Algorithmica_} +@string{Angiogenesis = _Angiogenesis_} +@string{Apidologie = _Apidologie_} +@string{Atherosclerosis = _Atherosclerosis_} @string{Automedica = _Automedica_} @string{BAMS = _Bulletin_of_the_Autralian_Mathematical_Society_} @string{BCRT = _Breast_Cancer_Research_and_Treatment_} @string{BDM = _BioData_Mining_} @string{BEJZ = _Belgian_Journal_of_Zoology_} -@string{Bildgebung = _Bildgebung_} @string{BIOBA = _Biochimica_et_Biophysica_Acta_} -@string{Biochemistry = _Biochemistry_} @string{BIOCJ = _Biochemical_Journal_} @string{BIOCST = _Biochemical_Society_Transactions_} @string{BIOD = _BioDrugs_} -@string{BioIm = _IEEE_International_Symposium_on_Biomedical_Imaging_} @string{BIOLC = _Biological_Cybernetics_} @string{BIOLN = _Biology_of_the_Neonate_} @string{BIOLP = _Biological_Psychiatry_} -@string{Biomaterials = _Biomaterials_} @string{BIOMDL = _Biomedical_Digital_Libraries_} @string{BIOMEO = _Biomedical_Engineering_Online_} -@string{Biometrics = _Biometrics_} -@string{Biometrika = _Biometrika_} @string{BIOMS = _Biomedical_Simulation_} @string{BIOMSI = _Biomedical_Sciences_Instrumentation_} @string{BIOMT = _Biomedizinische_Technik_} @string{BIOPC = _Biophysical_Chemistry_} @string{BIOPLJ = _Biophysical_Journal_} @string{BIOPSJ = _Biophysics_Journal_} -@string{Biotechniques = _Biotechniques_} @string{BJA = _British_Journal_of_Anaesthesia_} @string{BJC = _British_Journal_of_Cancer_} @string{BJCP = _British_Journal_of_Clinical_Pharmacology_} @@ -163,7 +156,6 @@ @string{BJP @string{BJR = _British_Journal_of_Radiology_} @string{BJS = _British_Journal_of_Surgery_} @string{BJUI = _British_Journal_of_Urology_International_} -@string{Blut = _Blut_} @string{BMCCAN = _BMC_Cancer_} @string{BMCID = _BMC_Infectious_Diseases_} @string{BMCIMM = _BMC_Immunology_} @@ -182,14 +174,11 @@ @string{BMJO @string{BMT = _Bone_Marrow_Transplantation_} @string{BMVC = _British_Machine_Vision_Conference_} @string{BOE = _Biomedical_Optics_Express_} -@string{Bone = _Bone_} @string{BRAI = _Brain_Injury_} -@string{Brain = _Brain_} @string{BRAP = _Brain_Pathology_} @string{BRAR = _Brain_Research_} @string{BRASF = _Brain_Structure_and_Function_} @string{BRAT = _Brain_Topography_} -@string{Breast = _Breast_} @string{BREC = _Breast_Cancer_} @string{BRECR = _Breast_Cancer_Research_} @string{BRECRT = _Breast_Cancer_Research_and_Treatment_} @@ -197,14 +186,24 @@ @string{BREJ @string{BSTJ = _Bell_System_Technical_Journal_} @string{BULMB = _Bulletin_of_Mathematical_Biology_} @string{BULWHO = _Bulletin_of_the_World_Health_Organization_} +@string{Bildgebung = _Bildgebung_} +@string{BioIm = _IEEE_International_Symposium_on_Biomedical_Imaging_} +@string{Biochemistry = _Biochemistry_} +@string{Biomaterials = _Biomaterials_} +@string{Biometrics = _Biometrics_} +@string{Biometrika = _Biometrika_} +@string{Biotechniques = _Biotechniques_} +@string{Blut = _Blut_} +@string{Bone = _Bone_} +@string{Brain = _Brain_} +@string{Breast = _Breast_} @string{CACJC = _CA_a_Cancer_Journal_for_Clinicians_} @string{CADES = _Computer-Aided_Design_} @string{CAJM = _Central_African_Journal_of_Medicine_} @string{CANARJ = _Canadian_Association_of_Radiologists_Journal_} @string{CANB = _Cancer_Biomarkers_} @string{CANBR = _Cancer_Biotherapy_and_Radiopharmaceuticals_} -@string{Cancer = _Cancer_} -@string{CANCERS = _Cancers_ } +@string{CANCERS = _Cancers_} @string{CANE = _Cancer_Epidemiology_} @string{CANEBP = _Cancer_Epidemiology_Biomarkers_and_Prevention_} @string{CANI = _Cancer_Imaging_} @@ -224,7 +223,6 @@ @string{CCT @string{CD = _Clinical_Diabetes_} @string{CDSR = _Cochrane_Database_of_Systematic_Reviews_} @string{CELBB = _Cell_Biochemistry_and_Biophysics_} -@string{Cell = _Cell_} @string{CELLO = _Cellular_Oncology_} @string{CELMLS = _Cellular_and_Molecular_Life_Sciences_} @string{CERVD = _Cerebrovascular_Diseases_} @@ -232,17 +230,12 @@ @string{CG @string{CGA = _IEEE_Computer_Graphics_and_Applications_} @string{CGC = _Clinical_Genitourinary_Cancer_} @string{CGIP = _Computer_Graphics_and_Image_Processing_} -@string{Chemphyschem = _Chemphyschem_} -@string{Chest = _Chest_} @string{CHIMJ = _Chinese_Medical_Journal_} @string{CHIMSJ = _Chinese_Medical_Sciences_Journal_} -@string{Chirurg = _Chirurg_} @string{CIRCCVI = _Circulation_Cardiovascular_Imaging_} @string{CIRCJ = _Circulation_Journal_} @string{CIRCRES = _Circulation_Research_} -@string{Circulation = _Circulation_} @string{CLEPCFJ = _Cleft_Palate-Craniofacial_Journal_} -@string{Climacteric = _Climacteric_} @string{CLINATH = _Clinical_and_Applied_Thrombosis/Hemostasis_} @string{CLINCC = _Clinical_Colorectal_Cancer_} @string{CLINCHEM = _Clinical_Chemistry_} @@ -271,7 +264,6 @@ @string{COMJ @string{COMMACM = _Communications_of_the_Association_for_Computing_Machinery_} @string{COMMMED = _Communications_Medicine_} @string{COMPAY = _Computational_Pathology_and_Ophthalmic_Medical_Image_Analysis_} -@string{Computing = _Computing_} @string{CONCLITRI = _Controlled_Clinical_Trials_} @string{CONMMI = _Contrast_Media_and_Molecular_Imaging_} @string{COPD = _COPD_} @@ -306,6 +298,14 @@ @string{CVGIP @string{CVIR = _Cardiovascular_and_Interventional_Radiology_} @string{CVIU = _Computer_Vision_and_Image_Understanding_} @string{CVPR = _Computer_Vision_and_Pattern_Recognition_} +@string{Cancer = _Cancer_} +@string{Cell = _Cell_} +@string{Chemphyschem = _Chemphyschem_} +@string{Chest = _Chest_} +@string{Chirurg = _Chirurg_} +@string{Circulation = _Circulation_} +@string{Climacteric = _Climacteric_} +@string{Computing = _Computing_} @string{Cytometry = _Cytometry_} @string{CytometryB = _Cytometry_Part_B-Clinical_Cytometry_} @string{DAM = _Discrete_Applied_Mathematics_} @@ -314,8 +314,6 @@ @string{DC @string{DDT = _Drug_Discovery_Today_} @string{DENTRAD = _Dentomaxillofacial_Radiology_} @string{DI = _Diagnostic_Imaging_} -@string{Diabetes = _Diabetes_} -@string{Diabetologia = _Diabetologia_} @string{DIE = _Diagnostic_Imaging_Europe_} @string{DIGD = _Digestive_Diseases_} @string{DIGDS = _Digestive_Diseases_and_Sciences_} @@ -328,11 +326,12 @@ @string{DMKD @string{DPR = _Delft_Progress_Report_} @string{DRCP = _Diabetes_Research_and_Clinical_Practice_} @string{DTT = _Diabetes_Technology_and_Therapeutics_} +@string{Diabetes = _Diabetes_} +@string{Diabetologia = _Diabetologia_} @string{EAU = _Annual_European_Association_of_Urology_Congress_} @string{EBCC = _European_Breast_Cancer_Conference_} @string{EBJ = _European_Biophysics_Journal_} @string{ECCV = _European_Conference_on_Computer_Vision_} -@string{Ecology = _Ecology_} @string{ECP = _European_Congress_of_Pathology_} @string{ECR = _European_Congress_of_Radiology_} @string{EER = _Experimental_Eye_Research_} @@ -372,16 +371,13 @@ @string{EMBS @string{EMEMCNA = _Emergency_Medicine_Clinics_of_North_America_} @string{EMEMJ = _Emergency_Medicine_Journal_} @string{EMERAD = _Emergency_Radiology_} -@string{Endocrine = _Endocrine_} @string{ENPP = _European_Neuropsychopharmacology_} @string{EOPT = _Expert_Opinion_on_Pharmacotherapy_} -@string{Epilepsia = _Epilepsia_} @string{EPJC = _European_Physical_Journal_C_-_Particles_and_Fields_} @string{EPL = _Europhysics_Letters_} @string{ER = _European_Radiology_} @string{ERACT = _Expert_Review_of_Anticancer_Therapy_} @string{ERAIT = _Expert_Review_of_Anti-Infective_Therapy_} -@string{Ergonomics = _Ergonomics_} @string{ERJ = _European_Respiratory_Journal_} @string{ERJS = _European_Respiratory_Journal_Supplement_} @string{ERMD = _Expert_Review_of_Medical_Devices_} @@ -399,27 +395,31 @@ @string{EURETINA @string{EXPBR = _Experimental_Brain_Research_} @string{EXPCR = _Experimental_Cell_Research_} @string{EXPGER = _Experimental_Gerontology_} +@string{Ecology = _Ecology_} +@string{Endocrine = _Endocrine_} +@string{Epilepsia = _Epilepsia_} +@string{Ergonomics = _Ergonomics_} @string{Eye = _Eye_} @string{FEBSL = _FEBS_Letters_} @string{FICT = _Frontiers_in_ICT_} @string{FTCGV = _Foundations_and_Trends_in_Computer_Graphics_and_Vision_} @string{GACEO = _Graefe_s_Archive_for_Clinical_and_Experimental_Ophthalmology_} +@string{GO = _Gynecologic_Oncology_} +@string{GROHIR = _Growth_Hormone_and_IGF_Research_} @string{Gamma = _Gamma_} @string{Gastroenterology = _Gastroenterology_} @string{GigaScience = _GigaScience_} -@string{GO = _Gynecologic_Oncology_} -@string{GROHIR = _Growth_Hormone_and_IGF_Research_} @string{Gut = _Gut_} @string{HEADN = _Head_and_Neck_} @string{HEARINGR = _Hearing_Research_} -@string{Hepatology = _Hepatology_} -@string{Herz = _Herz_} -@string{Histopathology = _Histopathology_} @string{HORMONER = _Hormone_Research_} @string{HTA = _Health_Technology_Assessment_} @string{HUMBM = _Human_Brain_Mapping_} @string{HUMMG = _Human_Molecular_Genetics_} @string{HUMP = _Human_Pathology_} +@string{Hepatology = _Hepatology_} +@string{Herz = _Herz_} +@string{Histopathology = _Histopathology_} @string{Hypertension = _Hypertension_} @string{Hystrix = _Hystrix_} @string{IAOEH = _International_Archives_of_Occupational_and_Environmental_Health_} @@ -475,7 +475,6 @@ @string{IJWOMHEA @string{INDAMAT = _Indagationes_Mathematicae_} @string{INDJO = _Indian_Journal_of_Ophthalmology_} @string{INDPED = _Indian_Pediatrics_} -@string{Infection = _Infection_} @string{INSI = _Insights_into_Imaging_} @string{INTMEDJ = _Internal_Medicine_Journal_} @string{INTNR = _Interventional_Neuroradiology_} @@ -494,6 +493,7 @@ @string{IUJPFD @string{IUN = _International_Urology_and_Nephrology_} @string{IUS = _IEEE_International_Ultrasonics_Symposium_} @string{IVC = _Image_and_Vision_Computing_} +@string{Infection = _Infection_} @string{JA = _Journal_of_Anatomy_} @string{JAAD = _Journal_of_the_American_Academy_of_Dermatology_} @string{JAAPOS = _Journal_of_the_American_Association_for_Pediatric_Ophthalmology_and_Strabismus_} @@ -701,20 +701,20 @@ @string{KJR @string{KLINF = _Klinische_Fysica_} @string{Knee = _Knee_} @string{LABINV = _Laboratory_Investigation_} -@string{Lancet = _Lancet_} @string{LANCETGH = _Lancet_Global_Health_} @string{LANCETID = _Lancet_Infectious_Diseases_} @string{LANCETN = _Lancet_Neurology_} @string{LANCETO = _Lancet_Oncology_} @string{LANCETRM = _Lancet_Respiratory_Medicine_} -@string{Laryngoscope = _Laryngoscope_} @string{LDA = _Lifetime_Data_Analysis_} @string{LHD = _Lipids_in_Health_and_Disease_} @string{LNAI = _Lecture_Notes_in_Artificial_Intelligence_} @string{LNCS = _Lecture_Notes_in_Computer_Science_} -@string{Lung = _Lung_} @string{LUNGC = _Lung_Cancer_} @string{LYMPHRESBIO = _Lymphatic_Research_and_Biology_} +@string{Lancet = _Lancet_} +@string{Laryngoscope = _Laryngoscope_} +@string{Lung = _Lung_} @string{MACD = _Monaldi_Archives_for_Chest_Disease_} @string{MAGMA = _MAGMA_} @string{MBEC = _Medical_and_Biological_Engineering_and_Computing_} @@ -724,7 +724,6 @@ @string{MDM @string{MEDCNA = _Medical_Clinics_of_North_America_} @string{MEDSCIMON = _Medical_Science_Monitor_} @string{MEDUS = _Medical_Ultrasonography_} -@string{Menopause = _Menopause_} @string{MEP = _Medical_Engineering_and_Physics_} @string{MGR = _Magnesium_Research_} @string{MI = _Medical_Imaging_} @@ -733,10 +732,8 @@ @string{MICAD @string{MICCAI = _Medical_Image_Computing_and_Computer-Assisted_Intervention_} @string{MIDL = _Medical_Imaging_with_Deep_Learning_} @string{MIM = _Methods_of_Information_in_Medicine_} -@string{Mind = _Mind_} @string{MINF = _Medical_Informatics_} @string{MIT = _Medical_Imaging_Technology_} -@string{Mitochondrion = _Mitochondrion_} @string{MJA = _Medical_Journal_of_Australia_} @string{ML = _Machine_Learning_} @string{MM = _Medica_Mundi_} @@ -759,8 +756,9 @@ @string{MRMPBM @string{MRMS = _Magnetic_Resonance_in_Medical_Sciences_} @string{MRP = _Medical_Radiography_and_Photography_} @string{MVA = _Machine_Vision_and_Applications_} -@string{Nanobiotechnology = _Nanobiotechnology_} -@string{Nanotechnology = _Nanotechnology_} +@string{Menopause = _Menopause_} +@string{Mind = _Mind_} +@string{Mitochondrion = _Mitochondrion_} @string{NATCAN = _Nature_Cancer_} @string{NATCOM = _Nature_Communications_} @string{NATMED = _Nature_Medicine_} @@ -772,13 +770,10 @@ @string{NATREVNEP @string{NATREVNEU = _Nature_Reviews_Neurology_} @string{NATREVU = _Nature_Reviews_Urology_} @string{NATSCIREP = _Nature_Scientific_Reports_} -@string{Nature = _Nature_} @string{NATUREBT = _Nature_Biotechnology_} @string{NBE = _Nature_Biomedical_Engineering_} @string{NDT = _Nephrology_Dialysis_Transplantation_} @string{NEJM = _New_England_Journal_of_Medicine_} -@string{Neoplasia = _Neoplasia_} -@string{Nervenarzt = _Nervenarzt_} @string{NETHHJ = _Netherlands_Heart_Journal_} @string{NETHJM = _Netherlands_Journal_of_Medicine_} @string{NEUBA = _Neurobiology_of_Aging_} @@ -793,13 +788,6 @@ @string{NEUN @string{NEUONC = _Neuro-oncology_} @string{NEURES = _Neurological_Research_} @string{NEURNR = _Neurorehabilitation_and_Neural_Repair_} -@string{Neurocomputing = _Neurocomputing_} -@string{Neurology = _Neurology_} -@string{Neuron = _Neuron_} -@string{Neuropediatrics = _Neuropediatrics_} -@string{Neuroradiology = _Neuroradiology_} -@string{Neuroreport = _Neuroreport_} -@string{Neurosurgery = _Neurosurgery_} @string{NEURUUD = _Neurourology_and_Urodynamics_} @string{NEUSF = _Neurosurgical_Focus_} @string{NEUSRC = _Neuroscience_Research_Communications_} @@ -812,21 +800,33 @@ @string{NRU @string{NTVG = _Nederlands_Tijdschrift_voor_Geneeskunde_} @string{NUCMB = _Nuclear_Medicine_and_Biology_} @string{NUCMC = _Nuclear_Medicine_Communications_} +@string{Nanobiotechnology = _Nanobiotechnology_} +@string{Nanotechnology = _Nanotechnology_} +@string{Nature = _Nature_} +@string{Neoplasia = _Neoplasia_} +@string{Nervenarzt = _Nervenarzt_} +@string{Neurocomputing = _Neurocomputing_} +@string{Neurology = _Neurology_} +@string{Neuron = _Neuron_} +@string{Neuropediatrics = _Neuropediatrics_} +@string{Neuroradiology = _Neuroradiology_} +@string{Neuroreport = _Neuroreport_} +@string{Neurosurgery = _Neurosurgery_} @string{Nuklearmedizin = _Nuklearmedizin_} @string{OE = _Optics_Express_} @string{OEM = _Occupational_and_Environmental_Medicine_} @string{OMS = _Optimization_Methods_and_Software_} +@string{OO = _Oral_Oncology_} +@string{OPTC = _Optics_Communications_} +@string{OPTE = _Optical_Engineering_} +@string{OTON = _Otology_and_Neurotology_} +@string{OVS = _Optometry_and_Vision_Science_} @string{Oncoimmunology = _Oncoimmunology_} @string{Oncologist = _Oncologist_} @string{Oncology = _Oncology_} @string{Oncotarget = _Oncotarget_} -@string{OO = _Oral_Oncology_} @string{Ophthalmologica = _Ophthalmologica_} @string{Ophthalmology = _Ophthalmology_} -@string{OPTC = _Optics_Communications_} -@string{OPTE = _Optical_Engineering_} -@string{OTON = _Otology_and_Neurotology_} -@string{OVS = _Optometry_and_Vision_Science_} @string{PAA = _Pattern_Analysis_and_Applications_} @string{PARC = _Parallel_Computing_} @string{PATHOB = _Pathobiology_} @@ -839,14 +839,11 @@ @string{PEDCAR @string{PEDCCM = _Pediatric_Critical_Care_Medicine_} @string{PEDCNA = _Pediatric_Clinics_of_North_America_} @string{PEDEC = _Pediatric_Emergency_Care_} -@string{Pediatrics = _Pediatrics_} @string{PEDNEP = _Pediatric_Nephrology_} @string{PEDNEU = _Pediatric_Neurology_} @string{PEDPUL = _Pediatric_Pulmonology_} @string{PEDRAD = _Pediatric_Radiology_} @string{PEDRES = _Pediatric_Research_} -@string{Peptides = _Peptides_} -@string{Perception = _Perception_} @string{PHA = _Public_Health_Action_} @string{PHYSMED = _Physica_Medica_} @string{PHYSREVA = _Physical_Review_A_} @@ -861,21 +858,24 @@ @string{PLOSMED @string{PLOSONE = _PLoS_One_} @string{PMB = _Physics_in_Medicine_and_Biology_} @string{PNAS = _Proceedings_of_the_National_Academy_of_Sciences_of_the_United_States_of_America_} -@string{Pneumologie = _Pneumologie_} @string{PR = _Pattern_Recognition_} @string{PRER = _Progress_in_Retinal_and_Eye_Research_} @string{PRIA = _Pattern_Recognition_and_Image_Analysis_} @string{PRJ = _PeerJ_} @string{PRL = _Pattern_Recognition_Letters_} @string{PROCBS = _Proceedings_Biological_Sciences_} -@string{Prostate = _Prostate_} -@string{Proteins = _Proteins_} @string{PSPOIE = _Proceedings_of_the_Society_of_Photo-Optical_Instrumentation_Engineers_} @string{PSR = _Photosynthesis_Research_} @string{PSYB = _Psychological_Bulletin_} @string{PSYCHIRES = _Psychiatry_Research_} @string{PSYM = _Psychological_Methods_} @string{PSYR = _Psychological_Research_} +@string{Pediatrics = _Pediatrics_} +@string{Peptides = _Peptides_} +@string{Perception = _Perception_} +@string{Pneumologie = _Pneumologie_} +@string{Prostate = _Prostate_} +@string{Proteins = _Proteins_} @string{QIMS = _Quantitative_Imaging_in_Medicine_and_Surgery_} @string{QJM = _QJM_Monthly_Journal_of_the_Association_of_Physicians_} @string{QJNUCMED = _Quarterly_Journal_of_Nuclear_Medicine_} @@ -883,10 +883,7 @@ @string{QJNUCMEDMOLIMA @string{RADBRA = _Radiologia_Brasileira_} @string{RADD = _RadiologenDagen_} @string{RADIATRES = _Radiation_Research_} -@string{Radiographics = _Radiographics_} @string{RADIOLMED = _La_Radiologia_Medica_} -@string{Radiologe = _Radiologe_} -@string{Radiology = _Radiology_} @string{RADIOLONC = _Radiology_and_Oncology_} @string{RADM = _Radiation_Medicine_} @string{RADONC = _Radiation_Oncology_} @@ -900,33 +897,33 @@ @string{REGTOXPHA @string{REPSCI = _Reproductive_Sciences_} @string{REPTOX = _Reproductive_Toxicology_} @string{RESPC = _Respiratory_Care_} -@string{Respiration = _Respiration_} -@string{Respirology = _Respirology_} @string{RESPM = _Respiratory_Medicine_} @string{RESPR = _Respiratory_Research_} -@string{Retina = _Retina_} @string{REVU = _Reviews_in_Urology_} @string{RHEINT = _Rheumatology_International_} @string{RHEUMADV = _Rheumatology_Advances_in_Practice_} -@string{Rheumatology = _Rheumatology_} @string{RHOISLMED = _Rhode_Island_Medicine_} @string{RIMTSP = _Revista_do_Instituto_de_Medicina_Tropical_de_Sao_Paulo_} @string{RM = _Respiratory_Medicine_} -@string{Robotica = _Robotica_} -@string{Rofo = _RoFo_} @string{RPD = _Radiation_Protection_Dosimetry_} @string{RPT = _Radiological_Physics_and_Technology_} @string{RSNA = _Annual_Meeting_of_the_Radiological_Society_of_North_America_} @string{RTO = _Radiotherapy_and_Oncology_} -@string{Sarcoma = _Sarcoma_} +@string{Radiographics = _Radiographics_} +@string{Radiologe = _Radiologe_} +@string{Radiology = _Radiology_} +@string{Respiration = _Respiration_} +@string{Respirology = _Respirology_} +@string{Retina = _Retina_} +@string{Rheumatology = _Rheumatology_} +@string{Robotica = _Robotica_} +@string{Rofo = _RoFo_} @string{SCACARJ = _Scandinavian_Cardiovascular_Journal_} @string{SCAJGAS = _Scandinavian_Journal_of_Gastroenterology_} @string{SCAJINFDIS = _Scandinavian_Journal_of_Infectious_Diseases_} @string{SCC = _Supportive_Care_in_Cancer_} @string{SCHBUL = _Schizophrenia_Bulletin_} -@string{Science = _Science_} @string{SCIENCETM = _Science_Translational_Medicine_} -@string{Scientometrics = _Scientometrics_} @string{SCIREP = _Scientific_Reports_} @string{SEMCARTVA = _Seminars_in_Cardiothoracic_and_Vascular_Anesthesia_} @string{SEMIC = _Seminars_in_Interventional_Cardiology_} @@ -939,7 +936,6 @@ @string{SEMTHOCVS @string{SEMUCM = _Seminars_in_Ultrasound_CT_and_MR_} @string{SEMVMSSA = _Seminars_in_Veterinary_Medicine_and_Surgery_Small_Animal_} @string{SEMVS = _Seminars_in_Vascular_Surgery_} -@string{Sensors = _Sensors_} @string{SHTI = _Studies_in_Health_Technology_and_Informatics_} @string{SIAMJCOM = _SIAM_Journal_on_Computing_} @string{SIAMJMAA = _SIAM_Journal_on_Matrix_Analysis_and_Applications_} @@ -949,23 +945,27 @@ @string{SIGPRO @string{SKER = _Skeletal_Radiology_} @string{SKIRESTEC = _Skin_Research_and_Technology_} @string{SM = _Statistics_in_Medicine_} -@string{Small = _Small_} @string{SMMR = _Statistical_Methods_in_Medical_Research_} @string{SOCSCIMED = _Social_Science_and_Medicine_} @string{SOUAFRMEDJ = _South_African_Medical_Journal_} @string{SP = _Signal_Processing_} @string{SPIE = _Proceedings_of_the_SPIE_} -@string{Spine = _Spine_} @string{SPM = _IEEE_Signal_Processing_Magazine_} @string{SRA = _Surgical_and_Radiologic_Anatomy_} @string{STACOM = _Statistics_and_Computing_} @string{STASUR = _Statistics_Surveys_} -@string{Stroke = _Stroke_} @string{STRONK = _Strahlentherapie_und_Onkologie_} @string{SUBCELBIO = _Subcellular_Biochemistry_} -@string{Surgery = _Surgery_} @string{SUROPH = _Survey_of_Ophthalmology_} @string{SYMCULSCI = _Symmetry_Culture_and_Science_} +@string{Sarcoma = _Sarcoma_} +@string{Science = _Science_} +@string{Scientometrics = _Scientometrics_} +@string{Sensors = _Sensors_} +@string{Small = _Small_} +@string{Spine = _Spine_} +@string{Stroke = _Stroke_} +@string{Surgery = _Surgery_} @string{Symmetry = _Symmetry_} @string{TAI = _IEEE_Transactions_on_Artificial_Intelligence_} @string{TARONC = _Targeted_Oncology_} @@ -978,13 +978,11 @@ @string{TCR @string{TCS = _Theory_of_Computing_Systems_} @string{TECHCRT = _Technology_in_Cancer_Research_and_Treatment_} @string{TECHHC = _Technology_and_Health_Care_} -@string{Technometrics = _Technometrics_} @string{TEH = _Telemedicine_and_e-health_} @string{TEXHIJ = _Texas_Heart_Institute_Journal_} @string{TFS = _IEEE_Transactions_on_Fuzzy_Systems_} @string{THEDRUMON = _Therapeutic_Drug_Monitoring_} @string{THOCVS = _Thoracic_and_Cardiovascular_Surgeon_} -@string{Thorax = _Thorax_} @string{THOSC = _Thoracic_Surgery_Clinics_} @string{THRHAE = _Thrombosis_and_Haemostasis_} @string{THRRES = _Thrombosis_Research_} @@ -1009,46 +1007,48 @@ @string{TOXAP @string{TP = _Toxicologic_Pathology_} @string{TPAMI = _IEEE_Transactions_on_Pattern_Analysis_and_Machine_Intelligence_} @string{TRAMON = _Trauma_Monthly_} -@string{Transfusion = _Transfusion_} @string{TRAP = _Transplantation_Proceedings_} @string{TREBT = _Trends_in_Biotechnology_} @string{TRECVM = _Trends_in_Cardiovascular_Medicine_} @string{TREEM = _Trends_in_Endocrinology_and_Metabolism_} @string{TREMM = _Trends_in_Molecular_Medicine_} -@string{Trials = _Trials_} @string{TROD = _Tropical_Doctor_} @string{TRSTMH = _Transactions_of_the_Royal_Society_of_Tropical_Medicine_and_Hygiene_} @string{TSAP = _IEEE_Transactions_on_Speech_and_Audio_Processing_} @string{TSMC = _IEEE_Transactions_on_Systems_Man_and_Cybernetics_} @string{TSP = _IEEE_Transactions_on_Signal_Processing_} -@string{Tuberculosis = _Tuberculosis_} @string{TUBLUNDIS = _Tubercle_and_Lung_Disease_} @string{TUFF = _IEEE_Transactions_on_Ultrasonics_Ferroelectrics_and_Frequency_Control_} @string{TUMBIO = _Tumor_Biology_} -@string{Tumori = _Tumori_} @string{TUR = _Tijdschrift_voor_Urologie_} @string{TVCG = _IEEE_Transactions_on_Visualization_and_Computer_Graphics_} +@string{Technometrics = _Technometrics_} +@string{Thorax = _Thorax_} +@string{Transfusion = _Transfusion_} +@string{Trials = _Trials_} +@string{Tuberculosis = _Tuberculosis_} +@string{Tumori = _Tumori_} @string{UI = _Ultrasonic_Imaging_} @string{UJMS = _Upsala_Journal_of_Medical_Sciences_} -@string{Ultramicroscopy = _Ultramicroscopy_} -@string{Ultrasonics = _Ultrasonics_} @string{UMB = _Ultrasound_in_Medicine_and_Biology_} -@string{Unfallchirurg = _Unfallchirurg_} @string{UOG = _Ultrasound_in_Obstetrics_and_Gynecology_} @string{UPDCANTHE = _Update_on_Cancer_Therapeutics_} @string{UROCLINA = _Urologic_Clinics_of_North_America_} -@string{Urologe = _Urologe_} -@string{Urology = _Urology_} @string{UROONC = _Urologic_Oncology_} @string{URORES = _Urological_Research_} @string{USM = _Ultraschall_in_der_Medizin_} -@string{Vascular = _Vascular_} +@string{Ultramicroscopy = _Ultramicroscopy_} +@string{Ultrasonics = _Ultrasonics_} +@string{Unfallchirurg = _Unfallchirurg_} +@string{Urologe = _Urologe_} +@string{Urology = _Urology_} @string{VETJ = _Veterinary_Journal_} @string{VIRA = _Virchows_Archiv_} @string{VISCOM = _Visual_Computer_} @string{VISR = _Vision_Research_} @string{VOPONK = _Voprosy_Onkologii_} @string{VRU = _Veterinary_Radiology_and_Ultrasound_} +@string{Vascular = _Vascular_} @string{WIENKW = _Wiener_Klinische_Wochenschrift_} @string{WIENKWS = _Wiener_Klinische_Wochenschrift_Supplementum_} @string{WIENMW = _Wiener_Medizinische_Wochenschrift_} @@ -1086,16 +1086,6 @@ @article{Abas05 gscites = {233}, } -@conference{Genu22, - author = {E. A. J. van Genugten and B. Piet and G. Schreibelt and T. van Oorschot and G. van den Heuvel and F. Ciompi and C. Jacobs and J. de Vries and M. M. van den Heuvel and E. Aarntzen}, - title = {Imaging tumor-infiltrating CD8 (+) T-cells in non-small cell lung cancer patients upon neo-adjuvant treatment with durvalumab}, - booktitle = {European Molecular Imaging Meeting}, - year = {2022}, - abstract = {INTRODUCTION: Immune checkpoint inhibitors (ICI), like targeting programmed death receptor ligand 1 (PD-L1), have revolutionized anti-cancer treatments, including non-small cell lung cancer (NSCLC) [1, 2]. Assessment of PD-L1 expression on tumor biopsies is current practice, but there is a need for additional biomarkers correlating to the complex mechanism of action of ICI. The presence of tumor-infiltrating CD8+ T-cells (TILs) is a robust biomarker associated with immune therapy success [3-6]. Tools to track TILs in patients during ICI treatment would allow further development of immune-oncology drugs. METHODS: This ongoing single-center prospective study (NCT03853187) includes patients with histologically proven T1b-3N0-1M0 NSCLC eligible for resection. Exclusion criteria are previous anti-cancer therapy <6 months and immune disorders or suppression. Patients receive two courses neo-adjuvant durvalumab (750mg Q2W) and consecutive TIL imaging. Cohort 1 underwent apheresis and magnetic-activated cells sorting (CliniMACS) to isolate 100 x10e6 autologous CD8+ T-cells for ex vivo labeling with 111In-oxine. Re-injection was followed by 4h post-injection (p.i.) planar imaging, 70h p.i. SPECT imaging, standard-of-care surgery and 78h p.i. uSPECT of the resected lobe. Patients in cohort 2 (ongoing) receive 1.5mg 89Zr-Df-crefmirlimab followed by PET/CT 24h p.i. and 74h p.i. uPET/CT of the resected lobe. RESULTS/DISCUSSION: In cohort 1, 8/10 patients completed TIL imaging; one procedure was withdrawn due to COVID-19 restrictions and one due to unsuccessful T-cell isolation. CliniMACS yield ranged 240-714 x10e6 CD8+ T-cells, purity 84-97% and cell viability 92-100%. Labeling efficacy of 100 x10e6 cells for re-injection ranged 42-64% with injected activity of 22,4-36,7 MBq In-111. TIL imaging was completed by 2/3 patients in cohort 2, one subject discontinued neo-adjuvant treatment due to pneumonia. To determine the potential for visual assessment of TILs, we analyzed ratios between tumor uptake and contralateral lung. We observed large variations within cohort 1, dependent on tumor localization. Ratios between tumor and bloodpool activity were determined to quantify specific accumulation in the tumor. Our results favor quantification of T-cells on PET over SPECT given its higher sensitivity and spatial resolution. Correlation of imaging with CD8+ T-cells in the resected tumor is ongoing (will be presented). CONCLUSIONS: We implemented two methods for tracking CD8+ T-cells in early-stage NSCLC patients after neo-adjuvant durvalumab. Although ex vivo cell labeling perhaps more specifically targets migrating TILs into the tumor, 89Zr-Df-crefmirlimab has the potential to also target residing cells. Quantitative correlation with presence of TILs in the resected tumor will help to determine the role of these imaging tools in the development of immune-oncology drugs.}, - optnote = {DIAG, RADIOLOGY}, -} - - @inproceedings{Abas05a, author = {D. Ab\'{a}solo and C. G\'{o}mez and J. Poza and M. Garc\'{i}a and C. I. S\'{a}nchez and M. L\'{o}pez}, title = {{EEG} background activity analysis in {A}lzheimer's disease patients with sample entropy}, @@ -1142,6 +1132,42 @@ @article{Abra08a month = {11}, gsid = {14186889695192659355}, gscites = {287}, + ss_id = {0445a1a0aa30c9ca41a8ab31463d617b849a0814}, + all_ss_ids = {['0445a1a0aa30c9ca41a8ab31463d617b849a0814']}, +} + +@article{Adam22a, + author = {Adams, Lisa C. and Makowski, Marcus R. and Engel, Gunther and Rattunde, Maximilian and Busch, Felix and Asbach, Patrick and Niehues, Stefan M. and Vinayahalingam, Shankeeth and van Ginneken, Bram and Litjens, Geert and Bressem, Keno K.}, + title = {Dataset of prostate MRI annotated for anatomical zones and cancer.}, + doi = {10.1016/j.dib.2022.108739}, + pages = {108739}, + volume = {45}, + abstract = {In the present work, we present a publicly available, expert-segmented representative dataset of 158 3.0 Tesla biparametric MRIs [1]. There is an increasing number of studies investigating prostate and prostate carcinoma segmentation using deep learning (DL) with 3D architectures [2], [3], [4], [5], [6], [7]. The development of robust and data-driven DL models for prostate segmentation and assessment is currently limited by the availability of openly available expert-annotated datasets [8], [9], [10]. The dataset contains 3.0 Tesla MRI images of the prostate of patients with suspected prostate cancer. Patients over 50 years of age who had a 3.0 Tesla MRI scan of the prostate that met PI-RADS version 2.1 technical standards were included. All patients received a subsequent biopsy or surgery so that the MRI diagnosis could be verified/matched with the histopathologic diagnosis. For patients who had undergone multiple MRIs, the last MRI, which was less than six months before biopsy/surgery, was included. All patients were examined at a German university hospital (Charite Universitatsmedizin Berlin) between 02/2016 and 01/2020. All MRI were acquired with two 3.0 Tesla MRI scanners (Siemens VIDA and Skyra, Siemens Healthineers, Erlangen, Germany). Axial T2W sequences and axial diffusion-weighted sequences (DWI) with apparent diffusion coefficient maps (ADC) were included in the data set. T2W sequences and ADC maps were annotated by two board-certified radiologists with 6 and 8 years of experience, respectively. For T2W sequences, the central gland (central zone and transitional zone) and peripheral zone were segmented. If areas of suspected prostate cancer (PIRADS score of >= 4) were identified on examination, they were segmented in both the T2W sequences and ADC maps. Because restricted diffusion is best seen in DWI images with high b-values, only these images were selected and all images with low b-values were discarded. Data were then anonymized and converted to NIfTI (Neuroimaging Informatics Technology Initiative) format.}, + file = {Adam22a.pdf:pdf\\Adam22a.pdf:PDF}, + journal = {Data in brief}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36426089}, + year = {2022}, + ss_id = {516b2dc6c3761458c8fa6f5759295673e86a42db}, + all_ss_ids = {['516b2dc6c3761458c8fa6f5759295673e86a42db']}, + gscites = {1}, +} + +@article{Adam22b, + author = {Adams, Lisa C. and Makowski, Marcus R. and Engel, Gunther and Rattunde, Maximilian and Busch, Felix and Asbach, Patrick and Niehues, Stefan M. and Vinayahalingam, Shankeeth and van Ginneken, Bram and Litjens, Geert and Bressem, Keno K.}, + title = {Prostate158 - An expert-annotated 3T MRI dataset and algorithm for prostate cancer detection.}, + doi = {10.1016/j.compbiomed.2022.105817}, + pages = {105817}, + volume = {148}, + abstract = {The development of deep learning (DL) models for prostate segmentation on magnetic resonance imaging (MRI) depends on expert-annotated data and reliable baselines, which are often not publicly available. This limits both reproducibility and comparability. Prostate158 consists of 158 expert annotated biparametric 3T prostate MRIs comprising T2w sequences and diffusion-weighted sequences with apparent diffusion coefficient maps. Two U-ResNets trained for segmentation of anatomy (central gland, peripheral zone) and suspicious lesions for prostate cancer (PCa) with a PI-RADS score of >=4 served as baseline algorithms. Segmentation performance was evaluated using the Dice similarity coefficient (DSC), the Hausdorff distance (HD), and the average surface distance (ASD). The Wilcoxon test with Bonferroni correction was used to evaluate differences in performance. The generalizability of the baseline model was assessed using the open datasets Medical Segmentation Decathlon and PROSTATEx. Compared to Reader 1, the models achieved a DSC/HD/ASD of 0.88/18.3/2.2 for the central gland, 0.75/22.8/1.9 for the peripheral zone, and 0.45/36.7/17.4 for PCa. Compared with Reader 2, the DSC/HD/ASD were 0.88/17.5/2.6 for the central gland, 0.73/33.2/1.9 for the peripheral zone, and 0.4/39.5/19.1 for PCa. Interrater agreement measured in DSC/HD/ASD was 0.87/11.1/1.0 for the central gland, 0.75/15.8/0.74 for the peripheral zone, and 0.6/18.8/5.5 for PCa. Segmentation performances on the Medical Segmentation Decathlon and PROSTATEx were 0.82/22.5/3.4; 0.86/18.6/2.5 for the central gland, and 0.64/29.2/4.7; 0.71/26.3/2.2 for the peripheral zone. We provide an openly accessible, expert-annotated 3T dataset of prostate MRI and a reproducible benchmark to foster the development of prostate segmentation algorithms.}, + file = {Adam22b.pdf:pdf\\Adam22b.pdf:PDF}, + journal = {Computers in biology and medicine}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35841780}, + year = {2022}, + ss_id = {c661857719657aac571718e3d48cb66b8fc1e941}, + all_ss_ids = {['c661857719657aac571718e3d48cb66b8fc1e941']}, + gscites = {11}, } @article{Adri09, @@ -1193,7 +1219,9 @@ @article{Adri10 number = {3}, pmid = {20548802}, gsid = {7949021548853267706}, - gscites = {5}, + gscites = {7}, + ss_id = {3da4ab4d50211eb7a0148c4ed74729b2d4e30ba9}, + all_ss_ids = {['3da4ab4d50211eb7a0148c4ed74729b2d4e30ba9']}, } @article{Adri11, @@ -1211,7 +1239,9 @@ @article{Adri11 pmid = {21459371}, month = {7}, gsid = {15610164965312020379}, - gscites = {77}, + gscites = {83}, + ss_id = {691c90e68eab1dcae5861fba0eab73ba626e6777}, + all_ss_ids = {['691c90e68eab1dcae5861fba0eab73ba626e6777']}, } @phdthesis{Adri11a, @@ -1243,7 +1273,9 @@ @article{Adri11b pmid = {21217088}, month = {1}, gsid = {4087773643446117872}, - gscites = {3}, + gscites = {7}, + ss_id = {c738da78dc1fb336a8dff5a8047059d52a6de939}, + all_ss_ids = {['c738da78dc1fb336a8dff5a8047059d52a6de939']}, } @article{Adri11c, @@ -1261,7 +1293,26 @@ @article{Adri11c pmid = {20921911}, month = {2}, gsid = {11770667656434269892}, - gscites = {3}, + gscites = {6}, + ss_id = {9fabf1bc1c126702a051ec56474af2d1518ad0ba}, + all_ss_ids = {['9fabf1bc1c126702a051ec56474af2d1518ad0ba']}, +} + +@article{Afsh21, + author = {Afshar-Oromieh, Ali and Prosch, Helmut and Schaefer-Prokop, Cornelia and Bohn, Karl Peter and Alberts, Ian and Mingels, Clemens and Thurnher, Majda and Cumming, Paul and Shi, Kuangyu and Peters, Alan and Geleff, Silvana and Lan, Xiaoli and Wang, Feng and Huber, Adrian and Gr\"{a}ni, Christoph and Heverhagen, Johannes T. and Rominger, Axel and Fontanellaz, Matthias and Sch\"{o}der, Heiko and Christe, Andreas and Mougiakakou, Stavroula and Ebner, Lukas}, + title = {~~A comprehensive review of imaging findings in COVID-19 - status in early 2021}, + doi = {10.1007/s00259-021-05375-3}, + year = {2021}, + abstract = {AbstractMedical imaging methods are assuming a greater role in the workup of patients with COVID-19, mainly in relation to the primary manifestation of pulmonary disease and the tissue distribution of the angiotensin-converting-enzyme 2 (ACE 2) receptor. However, the field is so new that no consensus view has emerged guiding clinical decisions to employ imaging procedures such as radiography, computer tomography (CT), positron emission tomography (PET), and magnetic resonance imaging, and in what measure the risk of exposure of staff to possible infection could be justified by the knowledge gained. The insensitivity of current RT-PCR methods for positive diagnosis is part of the rationale for resorting to imaging procedures. While CT is more sensitive than genetic testing in hospitalized patients, positive findings of ground glass opacities depend on the disease stage. There is sparse reporting on PET/CT with [18F]-FDG in COVID-19, but available results are congruent with the earlier literature on viral pneumonias. There is a high incidence of cerebral findings in COVID-19, and likewise evidence of gastrointestinal involvement. Artificial intelligence, notably machine learning is emerging as an effective method for diagnostic image analysis, with performance in the discriminative diagnosis of diagnosis of COVID-19 pneumonia comparable to that of human practitioners.}, + url = {http://dx.doi.org/10.1007/s00259-021-05375-3}, + file = {Afsh21.pdf:pdf\\Afsh21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Nuclear Medicine and Molecular Imaging}, + automatic = {yes}, + citation-count = {31}, + pages = {2500-2524}, + volume = {48}, + pmid = {33932183}, } @inproceedings{Albe11, @@ -1308,6 +1359,39 @@ @mastersthesis{Alee18 journal = {Master thesis}, } +@article{Alex21, + author = {Alexander, Barbara D and Lamoth, Fr\'{e}d\'{e}ric and Heussel, Claus Peter and Prokop, Cornelia Schaefer and Desai, Sujal R and Morrissey, C Orla and Baddley, John W}, + title = {~~Guidance on Imaging for Invasive Pulmonary Aspergillosis and Mucormycosis: From the Imaging Working Group for the Revision and Update of the Consensus Definitions of Fungal Disease from the EORTC/MSGERC}, + doi = {10.1093/cid/ciaa1855}, + year = {2021}, + abstract = {Abstract + + Background + Clinical imaging in suspected invasive fungal disease (IFD) has a significant role in early detection of disease and helps direct further testing and treatment. Revised definitions of IFD from the EORTC/MSGERC were recently published and provide clarity on the role of imaging for the definition of IFD. Here, we provide evidence to support these revised diagnostic guidelines. + + + Methods + We reviewed data on imaging modalities and techniques used to characterize IFDs. + + + Results + Volumetric high-resolution computed tomography (CT) is the method of choice for lung imaging. Although no CT radiologic pattern is pathognomonic of IFD, the halo sign, in the appropriate clinical setting, is highly suggestive of invasive pulmonary aspergillosis (IPA) and associated with specific stages of the disease. The ACS is not specific for IFD and occurs in the later stages of infection. By contrast, the reversed halo sign and the hypodense sign are typical of pulmonary mucormycosis but occur less frequently. In noncancer populations, both invasive pulmonary aspergillosis and mucormycosis are associated with "atypical" nonnodular presentations, including consolidation and ground-glass opacities. + + + Conclusions + A uniform definition of IFD could improve the quality of clinical studies and aid in differentiating IFD from other pathology in clinical practice. Radiologic assessment of the lung is an important component of the diagnostic work-up and management of IFD. Periodic review of imaging studies that characterize findings in patients with IFD will inform future diagnostic guidelines. + }, + url = {http://dx.doi.org/10.1093/cid/ciaa1855}, + file = {Alex21.pdf:pdf\\Alex21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Clinical Infectious Diseases}, + automatic = {yes}, + citation-count = {31}, + pages = {S79-S88}, + volume = {72}, + pmid = {33709131}, +} + @inproceedings{Altu20, author = {Altun, Hidir Cem and Chlebus, Grzegorz and Jacobs, Colin and Meine, Hans and van Ginneken, Bram and Hahn, Horst K.}, booktitle = MI, @@ -1320,38 +1404,77 @@ @inproceedings{Altu20 optnote = {DIAG, RADIOLOGY}, year = {2020}, month = {3}, + ss_id = {90a870cbb9897124193ba18c5358fe45b6260621}, + all_ss_ids = {['90a870cbb9897124193ba18c5358fe45b6260621']}, + gscites = {3}, +} + +@article{Alve23, + author = {Alves, Nat\'{a}lia and Bosma, Joeran S. and Venkadesh, Kiran V. and Jacobs, Colin and Saghir, Zaigham and de Rooij, Maarten and Hermans, John and Huisman, Henkjan}, + title = {Prediction Variability to Identify Reduced AI Performance in Cancer Diagnosis at MRI and CT}, + doi = {10.1148/radiol.230275}, + year = {2023}, + abstract = {Background:A priori identification of patients at risk of artificial intelligence (AI) failure in diagnosing cancer would contribute to the safer clinical integration of diagnostic algorithms. + Purpose:To evaluate AI prediction variability as an uncertainty quantification (UQ) metric for identifying cases at risk of AI failure in diagnosing cancer at MRI and CT across different cancer types, data sets, and algorithms. + Materials and Methods:Multicenter data sets and publicly available AI algorithms from three previous studies that evaluated detec-tion of pancreatic cancer on contrast-enhanced CT images, detection of prostate cancer on MRI scans, and prediction of pulmo-nary nodule malignancy on low-dose CT images were analyzed retrospectively. Each task's algorithm was extended to generate an uncertainty score based on ensemble prediction variability. AI accuracy percentage and partial area under the receiver operating characteristic curve (pAUC) were compared between certain and uncertain patient groups in a range of percentile thresholds (10%-90%) for the uncertainty score using permutation tests for statistical significance. The pulmonary nodule malignancy prediction algorithm was compared with 11 clinical readers for the certain group (CG) and uncertain group (UG). + Results:In total, 18 022 images were used for training and 838 images were used for testing. AI diagnostic accuracy was higher for the cases in the CG across all tasks (P < .001). At an 80% threshold of certain predictions, accuracy in the CG was 21%-29% higher than in the UG and 4%-6% higher than in the overall test data sets. The lesion-level pAUC in the CG was 0.25-0.39 higher than in the UG and 0.05-0.08 higher than in the overall test data sets (P < .001). For pulmonary nodule malignancy prediction, accuracy of AI was on par with clinicians for cases in the CG (AI results vs clinician results, 80% [95% CI: 76, 85] vs 78% [95% CI: 70, 87]; P = .07) but worse for cases in the UG (AI results vs clinician results, 50% [95% CI: 37, 64] vs 68% [95% CI: 60, 76]; P < .001). + Conclusion:An AI-prediction UQ metric consistently identified reduced performance of AI in cancer diagnosis.}, + url = {http://dx.doi.org/10.1148/radiol.230275}, + file = {Alve23.pdf:pdf\Alve23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + citation-count = {1}, + automatic = {yes}, + volume = {308}, + all_ss_ids = {740d5d34fcd714870ddf0073fd8956db023319f0}, + pmid = {37724961}, + gscites = {1}, } -@Conference{Alves21a, - author = {Alves, Nat\'{a}lia and Hermans, John and Huisman, Henkjan}, +@conference{Alves21a, + author = {Alves, Nat\'{a}lia and Hermans, John and Huisman, Henkjan}, booktitle = RSNA, - title = {CT-based Deep Learning Towards Early Detection Of Pancreatic Ductal Adenocarcinoma}, - abstract = {Purpose: To investigate the performance of a 3D nnUnet based algorithm for pancreatic ductal adenocarcinoma (PDAC)detection and assess the potential of the model for early diagnosis by conducting a subgroup analysis on small (size <2cm) tumors. Methods and Materials: Portal-venous phase contrast-enhanced computed tomography (CE-CT) scans from a cohort of119 patients with pathology-proven PDAC and 122 consecutive patients with normal pancreas were included in thisretrospective study. For the PDAC cohort, expert segmentations of the pancreas and tumor volumes were available, alongwith the tumor sizes measured on the CT scan. For the non-PDAC cohort, the pancreas segmentations were obtained usinga pre-trained deep learning segmentation model. The pancreas segmentation determined a region of interest from the fullCE-CT as input to the 3D nnUnet. - The network was trained for 1000 epochs with 5-fold cross-validation to differentiatebetween tumor and normal voxels. The predicted heatmaps were thresholded at 0.1. An image was considered a positivecase of PDAC if the predicted tumor volume was greater than 100 mm3. Results: The median tumor size on the PDAC cohort was 2.8 cm (range 1.2 cm - 9.3 cm). The detection task achieved anaverage sensitivity of 0.93 +- 0.04 (111/119), specificity of 0.98 +- 0.02 (119/122) and area under the receiver operatingcharacteristic curve of 0.96 +- 0.04. The median DICE score between the expert and the network tumor segmentations was0.68 +- 0.18. In 2 of the 3 false positive cases the network wrongly detected a hypodense region of the normal pancreas,which could be originated by fat accumulation or natural perfusion differences. The mean sensitivity in the sub-group oftumors with size smaller than 2 cm was 0.92 +- 0.1 (21/23), and the median DICE score in this sub-group was 0.56 +- 0.20. Conclusions: These preliminary results indicate that a 3D nnUnet based algorithm can accurately detect small tumors,suggesting that it could be useful at assisting in early PDAC diagnosis. Clinical Relevance/Application: Early diagnosis improves pancreatic cancer prognosis but requires significant expertise.An automatic tool for the detection of early-stage tumors would reduce expertise requirements.}, - optnote = {DIAG, RADIOLOGY}, - year = {2021}, -} - -@Article{Alves22a, - author = {Alves, Nat\'{a}lia and Schuurmans, Megan and Litjens, Geke and Bosma, Joeran S. and Hermans, John and Huisman, Henkjan}, - title = {Fully Automatic Deep Learning Framework for Pancreatic Ductal Adenocarcinoma Detection on Computed Tomography}, - doi = {https://doi.org/10.3390/cancers14020376}, - pages = {376}, - url = {https://www.mdpi.com/2072-6694/14/2/376/htm}, - abstract = {Early detection improves prognosis in pancreatic ductal adenocarcinoma (PDAC) but is challenging as lesions are often small and poorly defined on contrast-enhanced computed tomography scans (CE-CT). Deep learning can facilitate PDAC diagnosis, however current models still fail to identify small (<2cm) lesions. In this study, state-of-the-art deep learning models were used to develop an automatic framework for PDAC detection, focusing on small lesions. Additionally, the impact of integrating surrounding anatomy was investigated. CE-CT scans from a cohort of 119 pathology-proven PDAC patients and a cohort of 123 patients without PDAC were used to train a nnUnet for automatic lesion detection and segmentation (nnUnet_T). Two additional nnUnets were trained to investigate the impact of anatomy integration: (1) segmenting the pancreas and tumor (nnUnet_TP), (2) segmenting the pancreas, tumor, and multiple surrounding anatomical structures (nnUnet_MS). An external, publicly available test set was used to compare the performance of the three networks. The nnUnet_MS achieved the best performance, with an area under the receiver operating characteristic curve of 0.91 for the whole test set and 0.88 for tumors <2cm, showing that state-of-the-art deep learning can detect small PDAC and benefits from anatomy information.}, - journal = {Cancers}, - optnote = {DIAG, RADIOLOGY}, + title = {CT-based Deep Learning Towards Early Detection Of Pancreatic Ductal Adenocarcinoma}, + abstract = {Purpose: To investigate the performance of a 3D nnUnet based algorithm for pancreatic ductal adenocarcinoma (PDAC)detection and assess the potential of the model for early diagnosis by conducting a subgroup analysis on small (size <2cm) tumors. Methods and Materials: Portal-venous phase contrast-enhanced computed tomography (CE-CT) scans from a cohort of119 patients with pathology-proven PDAC and 122 consecutive patients with normal pancreas were included in thisretrospective study. For the PDAC cohort, expert segmentations of the pancreas and tumor volumes were available, alongwith the tumor sizes measured on the CT scan. For the non-PDAC cohort, the pancreas segmentations were obtained usinga pre-trained deep learning segmentation model. The pancreas segmentation determined a region of interest from the fullCE-CT as input to the 3D nnUnet. + The network was trained for 1000 epochs with 5-fold cross-validation to differentiatebetween tumor and normal voxels. The predicted heatmaps were thresholded at 0.1. An image was considered a positivecase of PDAC if the predicted tumor volume was greater than 100 mm3. Results: The median tumor size on the PDAC cohort was 2.8 cm (range 1.2 cm - 9.3 cm). The detection task achieved anaverage sensitivity of 0.93 +- 0.04 (111/119), specificity of 0.98 +- 0.02 (119/122) and area under the receiver operatingcharacteristic curve of 0.96 +- 0.04. The median DICE score between the expert and the network tumor segmentations was0.68 +- 0.18. In 2 of the 3 false positive cases the network wrongly detected a hypodense region of the normal pancreas,which could be originated by fat accumulation or natural perfusion differences. The mean sensitivity in the sub-group oftumors with size smaller than 2 cm was 0.92 +- 0.1 (21/23), and the median DICE score in this sub-group was 0.56 +- 0.20. Conclusions: These preliminary results indicate that a 3D nnUnet based algorithm can accurately detect small tumors,suggesting that it could be useful at assisting in early PDAC diagnosis. Clinical Relevance/Application: Early diagnosis improves pancreatic cancer prognosis but requires significant expertise.An automatic tool for the detection of early-stage tumors would reduce expertise requirements.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} + +@article{Alves22a, + author = {Alves, Nat\'{a}lia and Schuurmans, Megan and Litjens, Geke and Bosma, Joeran S. and Hermans, John and Huisman, Henkjan}, + title = {Fully Automatic Deep Learning Framework for Pancreatic Ductal Adenocarcinoma Detection on Computed Tomography}, + doi = {https://doi.org/10.3390/cancers14020376}, + pages = {376}, + url = {https://www.mdpi.com/2072-6694/14/2/376/htm}, + abstract = {Early detection improves prognosis in pancreatic ductal adenocarcinoma (PDAC) but is challenging as lesions are often small and poorly defined on contrast-enhanced computed tomography scans (CE-CT). Deep learning can facilitate PDAC diagnosis, however current models still fail to identify small (<2cm) lesions. In this study, state-of-the-art deep learning models were used to develop an automatic framework for PDAC detection, focusing on small lesions. Additionally, the impact of integrating surrounding anatomy was investigated. CE-CT scans from a cohort of 119 pathology-proven PDAC patients and a cohort of 123 patients without PDAC were used to train a nnUnet for automatic lesion detection and segmentation (nnUnet_T). Two additional nnUnets were trained to investigate the impact of anatomy integration: (1) segmenting the pancreas and tumor (nnUnet_TP), (2) segmenting the pancreas, tumor, and multiple surrounding anatomical structures (nnUnet_MS). An external, publicly available test set was used to compare the performance of the three networks. The nnUnet_MS achieved the best performance, with an area under the receiver operating characteristic curve of 0.91 for the whole test set and 0.88 for tumors <2cm, showing that state-of-the-art deep learning can detect small PDAC and benefits from anatomy information.}, + journal = {Cancers}, + optnote = {DIAG, RADIOLOGY}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/247178}, - year = {2022}, + year = {2022}, + ss_id = {3a84e2a533df9a1351f7442735c961bc80b5892e}, + all_ss_ids = {['3a84e2a533df9a1351f7442735c961bc80b5892e']}, + gscites = {24}, } -@Conference{Alves22b, - author = {Alves, Nat\'{a}lia and Bosma, Joeran S. and Huisman, Henkjan}, +@conference{Alves22b, + author = {Alves, Nat\'{a}lia and Bosma, Joeran S. and Huisman, Henkjan}, booktitle = RSNA, - title = {Towards Safe Clinical Use of Artificial Intelligence for Cancer Detection Through Uncertainty Quantification}, - abstract = {Purpose: Investigate whether quantifying deep learning (DL) models' uncertainty can help identify low performance cases that require expert attention. Materials and Methods: This retrospective study included two use cases: pancreatic cancer detection on contrast-enhanced computed tomography and clinically significant prostate cancer (csPCa) detection on biparametric magnetic resonance imaging. The pancreatic cohort consisted of 242 (119 cancer) in-house cases for training and 361 cases (80 healthy, 281 cancer) from two external, public datasets for testing. The csPCa cohort consisted of 7756 (3022 csPCa) in-house examinations for training and 300 cases (88 csPCa) from an external center for testing. All tumor cases in the independent test sets were histopathology confirmed. The uncertainty of the proposed automatic cancer detection algorithms was computed using model ensembling. Fifteen DL models were trained with the nnUNet framework and integrated into previously established pipelines for each use case. The models were applied independently to the test sets and uncertainty was quantified in a case level as the standard deviation (sd) of the ensemble. Cases with sd lower than 10% were classified as having low prediction uncertainty, while the remaining were classified as having high prediction uncertainty. The mean and 95% confidence intervals (CI) of the area under the receiver operating characteristic curves (AUC) for the high and low uncertainty groups were calculated. The permutation test was used to assess statistical significance. Results: The DL frameworks' performances for the uncertain groups were significantly lower than for the certain groups for both use cases. For pancreatic cancer detection, the mean AUC dropped from 98.0% (95%CI: 96.2%-99.8%) for the low uncertainty group to 78.0% (95%CI: 68.2%-87.8%) for the high uncertainty group (p<10-4). For csPCa, the mean AUC dropped from 92.4% (95%CI: 90.4%-94.4%) for the low uncertainty group to 65.7% (95%CI: 54.7%-76.7%) for the high uncertainty group (p<10-4). The low uncertainty groups included 41% of the pancreatic and 78% of the csPCa test sets. Conclusions: The proposed ensembling method can be used to identify cases where AI models' predictions are uncertain and show low performance. Clinical Relevance Statement: To be safely integrated in the clinic, AI can predict uncertainty and identify uncertain cases with lower performance that should be handled with extra care}, - optnote = {DIAG, RADIOLOGY}, - year = {2022}, + title = {Towards Safe Clinical Use of Artificial Intelligence for Cancer Detection Through Uncertainty Quantification}, + abstract = {Purpose: Investigate whether quantifying deep learning (DL) models' uncertainty can help identify low performance cases that require expert attention. Materials and Methods: This retrospective study included two use cases: pancreatic cancer detection on contrast-enhanced computed tomography and clinically significant prostate cancer (csPCa) detection on biparametric magnetic resonance imaging. The pancreatic cohort consisted of 242 (119 cancer) in-house cases for training and 361 cases (80 healthy, 281 cancer) from two external, public datasets for testing. The csPCa cohort consisted of 7756 (3022 csPCa) in-house examinations for training and 300 cases (88 csPCa) from an external center for testing. All tumor cases in the independent test sets were histopathology confirmed. The uncertainty of the proposed automatic cancer detection algorithms was computed using model ensembling. Fifteen DL models were trained with the nnUNet framework and integrated into previously established pipelines for each use case. The models were applied independently to the test sets and uncertainty was quantified in a case level as the standard deviation (sd) of the ensemble. Cases with sd lower than 10% were classified as having low prediction uncertainty, while the remaining were classified as having high prediction uncertainty. The mean and 95% confidence intervals (CI) of the area under the receiver operating characteristic curves (AUC) for the high and low uncertainty groups were calculated. The permutation test was used to assess statistical significance. Results: The DL frameworks' performances for the uncertain groups were significantly lower than for the certain groups for both use cases. For pancreatic cancer detection, the mean AUC dropped from 98.0% (95%CI: 96.2%-99.8%) for the low uncertainty group to 78.0% (95%CI: 68.2%-87.8%) for the high uncertainty group (p<10-4). For csPCa, the mean AUC dropped from 92.4% (95%CI: 90.4%-94.4%) for the low uncertainty group to 65.7% (95%CI: 54.7%-76.7%) for the high uncertainty group (p<10-4). The low uncertainty groups included 41% of the pancreatic and 78% of the csPCa test sets. Conclusions: The proposed ensembling method can be used to identify cases where AI models' predictions are uncertain and show low performance. Clinical Relevance Statement: To be safely integrated in the clinic, AI can predict uncertainty and identify uncertain cases with lower performance that should be handled with extra care}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@inproceedings{Alves22c, + author = {Alves, Nat{\'a}lia and de Wilde, Bram}, + booktitle = {Fast and Low-Resource Semi-supervised Abdominal Organ Segmentation}, + title = {Uncertainty-Guided Self-learning Framework for Semi-supervised Multi-organ Segmentation}, + doi = {https://doi.org/10.1007/978-3-031-23911-3_11}, + pages = {116--127}, + publisher = {Springer Nature Switzerland}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, } @article{Amga20, @@ -1367,8 +1490,9 @@ @article{Amga20 file = {:pdf/Amga20.pdf:PDF}, optnote = {DIAG}, gsid = {2197993663136428528}, - gscites = {2}, + gscites = {87}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/220833}, + all_ss_ids = {['69999230b02054b82254684a73bb8a4c83878d28', 'b4c4c3dc91d42114023b0575c3e2273b87446ff7']}, } @conference{Amin11, @@ -1391,20 +1515,50 @@ @article{Anto21 month = jun, optnote = {DIAG, PATHOLOGY, RADIOLOGY}, year = {2021}, + all_ss_ids = {['979a9f247700d00ff2c3f0612d5eb001379f93c8', 'c397c6f1480ac8e3ed875adad96e9b3e00c37f26']}, + gscites = {394}, } -@Article{Anto22, - author = {Antonelli, Michela and Reinke, Annika and Bakas, Spyridon and Farahani, Keyvan and Kopp-Schneider, Annette and Landman, Bennett A. and Litjens, Geert and Menze, Bjoern and Ronneberger, Olaf and Summers, Ronald M. and van Ginneken, Bram and Bilello, Michel and Bilic, Patrick and Christ, Patrick F. and Do, Richard K. G. and Gollub, Marc J. and Heckers, Stephan H. and Huisman, Henkjan and Jarnagin, William R. and McHugo, Maureen K. and Napel, Sandy and Pernicka, Jennifer S. Golia and Rhode, Kawal and Tobon-Gomez, Catalina and Vorontsov, Eugene and Meakin, James A. and Ourselin, Sebastien and Wiesenfarth, Manuel and Arbelaez, Pablo and Bae, Byeonguk and Chen, Sihong and Daza, Laura and Feng, Jianjiang and He, Baochun and Isensee, Fabian and Ji, Yuanfeng and Jia, Fucang and Kim, Ildoo and Maier-Hein, Klaus and Merhof, Dorit and Pai, Akshay and Park, Beomhee and Perslev, Mathias and Rezaiifar, Ramin and Rippel, Oliver and Sarasua, Ignacio and Shen, Wei and Son, Jaemin and Wachinger, Christian and Wang, Liansheng and Wang, Yan and Xia, Yingda and Xu, Daguang and Xu, Zhanwei and Zheng, Yefeng and Simpson, Amber L. and Maier-Hein, Lena and Cardoso, M. Jorge}, - title = {The {Medical} {Segmentation} {Decathlon}}, - doi = {10.1038/s41467-022-30695-9}, - number = {1}, - pages = {4128}, - volume = {13}, +@article{Anto22, + author = {Antonelli, Michela and Reinke, Annika and Bakas, Spyridon and Farahani, Keyvan and Kopp-Schneider, Annette and Landman, Bennett A. and Litjens, Geert and Menze, Bjoern and Ronneberger, Olaf and Summers, Ronald M. and van Ginneken, Bram and Bilello, Michel and Bilic, Patrick and Christ, Patrick F. and Do, Richard K. G. and Gollub, Marc J. and Heckers, Stephan H. and Huisman, Henkjan and Jarnagin, William R. and McHugo, Maureen K. and Napel, Sandy and Pernicka, Jennifer S. Golia and Rhode, Kawal and Tobon-Gomez, Catalina and Vorontsov, Eugene and Meakin, James A. and Ourselin, Sebastien and Wiesenfarth, Manuel and Arbelaez, Pablo and Bae, Byeonguk and Chen, Sihong and Daza, Laura and Feng, Jianjiang and He, Baochun and Isensee, Fabian and Ji, Yuanfeng and Jia, Fucang and Kim, Ildoo and Maier-Hein, Klaus and Merhof, Dorit and Pai, Akshay and Park, Beomhee and Perslev, Mathias and Rezaiifar, Ramin and Rippel, Oliver and Sarasua, Ignacio and Shen, Wei and Son, Jaemin and Wachinger, Christian and Wang, Liansheng and Wang, Yan and Xia, Yingda and Xu, Daguang and Xu, Zhanwei and Zheng, Yefeng and Simpson, Amber L. and Maier-Hein, Lena and Cardoso, M. Jorge}, + title = {The {Medical} {Segmentation} {Decathlon}}, + doi = {10.1038/s41467-022-30695-9}, + number = {1}, + pages = {4128}, + volume = {13}, abstract = {International challenges have become the de facto standard for comparative assessment of image analysis algorithms. Although segmentation is the most widely investigated medical image processing task, the various challenges have been organized to focus only on specific clinical tasks. We organized the Medical Segmentation Decathlon (MSD)-a biomedical image analysis challenge, in which algorithms compete in a multitude of both tasks and modalities to investigate the hypothesis that a method capable of performing well on multiple tasks will generalize well to a previously unseen task and potentially outperform a custom-designed solution. MSD results confirmed this hypothesis, moreover, MSD winner continued generalizing well to a wide range of other clinical problems for the next two years. Three main conclusions can be drawn from this study: (1) state-of-the-art image segmentation algorithms generalize well when retrained on unseen tasks; (2) consistent algorithmic performance across multiple tasks is a strong surrogate of algorithmic generalizability; (3) the training of accurate AI segmentation models is now commoditized to scientists that are not versed in AI model training.}, - file = {:pdf/Anto22.pdf:PDF}, - journal = NATCOM, - pmid = {35840566}, - year = {2022}, + file = {:pdf/Anto22.pdf:PDF}, + journal = NATCOM, + pmid = {35840566}, + year = {2022}, + all_ss_ids = {['979a9f247700d00ff2c3f0612d5eb001379f93c8', 'c397c6f1480ac8e3ed875adad96e9b3e00c37f26']}, + gscites = {394}, +} + +@conference{Anto23, + author = {Antonissen, N and Venkadesh, K and Gietema, H and Vliegenthart, R and Saghir, Z and Scholten, E. Th and Prokop, M and Schaefer-Prokop, C and Jacobs, C }, + booktitle = ECR, + title = {Retrospective validation of nodule management based on deep learning-based malignancy thresholds in lung cancer screening}, + abstract = {Purpose: We previously developed and validated a deep learning (DL) algorithm for malignancy risk estimation of screen-detected nodules. The nodule risk cut-off for a positive screen, triggering more intensive follow-up (either short-term follow-up, PET-CT or biopsy), varies in existing nodule management protocols; 1-2% for Lung-RADS (cat 3), 6% for PanCan2b (CAT3). In this study, we investigated two DL-based malignancy thresholds to define a positive screen, compared to existing nodule management protocols. + Methods and materials: All baseline CT-scans from the Danish Lung Cancer Screening Trial were linked to lung cancer diagnosis within 2 years, resulting in 2,019 non-cancer and 18 cancer cases. The DL-based malignancy risk was computed for all screen-detected nodules using two malignancy risk cut-off points (6% and 10%), as threshold for a positive screen. For both Lung-RADS and PanCan2b, we used the published nodule-risk cut-offs for a positive screen. Sensitivity and False Positive Rate (FPR) were calculated for all baseline scans (n=2,037) using the risk dominant nodule per scan. + Results: At a threshold of 6%, DL achieved the highest sensitivity with 88.9% compared to 83.3% of Lung-RADS and 77.8% with PanCan2b. DL and PanCan2b yielded comparable FPR of 3.6% and 4.1%, respectively, while Lung-RADS had a higher FPR of 8.7%. Increasing the DL threshold to >=10% resulted in a sensitivity of 88.9%, and a FPR of 2.5%. + Conclusion: DL-based nodule risk cut-offs achieved the highest sensitivity and lowest FPR for defining a positive screen, triggering more intense diagnostic work-up. Increasing the risk cut-off from 6% to 10% further decreased the FPR without alteration of sensitivity. + Limitations: This study is a retrospective analysis on data from one screening trial and one screening round. More external validation is needed, including validation for incidence screenings.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + +@conference{Anto23a, + author = {Antonissen, N and Venkadesh, K and Gietema, H and Vliegenthart, R and Saghir, Z and Silva, M and Pastorino, E, and Scholten, E. Th and Prokop, M and Schaefer-Prokop, C and Jacobs, C }, + booktitle = ESTI, + title = {Retrospective identification of low-risk individuals eligible for biennial lung cancer screening using PanCan-based and deep learning-based risk thresholds}, + abstract = {Purpose: Current nodule management protocols for managing negative screening results have varying follow-up intervals; LungRADS recommends a 1-year screening interval for all negative screens (category 1/2), while the International Lung Screen Trial (ILST) protocol recommends 1-year interval for participants with indeterminate nodules (PanCan score 1.5% - 6%) and 2-year interval for participants with no or very low risk nodules (PanCan score < 1.5%). In this study, we retrospectively evaluated the use of PanCan and DL-based malignancy thresholds to identify individuals eligible for biennial screening, aiming to reduce screening-related harms and enhancing cost-effectiveness without causing potential delay of cancer diagnosis. + Methods and materials: All baseline CT-scans from the Danish Lung Cancer Screening Trial (DLCST) and Multicentric Italian Lung Detection (MILD) were pooled and linked to a lung cancer diagnosis within 2 years, resulting in 4.157 non-cancer and 53 cancer cases. PanCan1a and DL-based malignancy risk scores were calculated for all screen-annotated nodules. For cases with no screen-annotated nodules, the risk score for participants was set as 0%. For both risk calculators, we used a nodule-risk cut-off of < 1.5% to identify low-risk participants for biennial follow-up, based on the ILST protocol. We computed the number of low-risk participants eligible for biennial screening for all included baseline scans (n=4.210) using the risk dominant nodule per scan and calculated the number of cancer cases in the biennial group. + Results: The DL-based and PanCan-based risk threshold < 1.5% identified 3.729 and 3.720 individuals, respectively, meeting the criteria for biennial screening. This would result in a reduction of 88.6% and 88.4% of the scans in the second screening round, respectively. The group referred for biennial screening included 14 and 16 cancers with DL and PanCan-based risk scores <1.5%, respectively. Most of the cancer cases (n=13), had no nodule annotated at baseline CT, leading to a 0% risk score at baseline. Retrospectively 4 of the 13 cancers were visible in the baseline scan, yet primarily not annotated by the screening radiologist. + Conclusion: Risk threshold-based identification of low-risk subjects for biennial screening largely reduces the number of 1-year follow-up scans. DL and PanCan for risk assessment performed very similarly, indicating the potential of DL for readily available risk assessment of baseline scans. A risk threshold of < 1.5%, as implemented in the ILST protocol, leads to delayed diagnosis of cancers either primarily missed during baseline or developing as interval cancers. More research is needed to study the type of cancers with delayed diagnosis and whether such delay leads to diagnostic stage shift. + Limitations: This study is a retrospective analysis on data from two screening trials, restricted to the baseline round.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, } @article{Apru19, @@ -1421,21 +1575,23 @@ @article{Apru19 pmid = {30993030}, month = {4}, gsid = {10979800637142162997}, - gscites = {3}, + gscites = {19}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/202849}, + ss_id = {0c44949334771d4e331d792bc0db77c7fe708ca2}, + all_ss_ids = {['0c44949334771d4e331d792bc0db77c7fe708ca2']}, } -@MastersThesis{Arch22, - author = {Anwai Archit and Bram van Ginneken}, - title = {Automated Abdominal Aortic Aneurysm Detection on CT Scans}, +@mastersthesis{Arch22, + author = {Anwai Archit and Bram van Ginneken}, + title = {Automated Abdominal Aortic Aneurysm Detection on CT Scans}, abstract = {Computed tomography (CT) scans enable the detection of local enlargements in the abdominal aorta (AA), resulting to straight-forward quantitative and qualitative understandings, typically instated as abdominal aortic aneurysm (AAA). Although, the segmentation of aorta is disposed to stall in presence of expanded lumen or intraluminal thrombus as a result of insufficient spiteful examples, raising the susceptibility for uneventful outcomes of an aortic rupture. - The motion of this research proposes to develop and validate a fully automated deep learning algorithm to segment and measure AAAs on abdominal CT scans. The computer-aided detection (CAD) model is steered by a self-configuring convolutional neural network (CNN), which plumps for essential decisions in a standardised environment to design the 3D segmentation pipeline, regardless of the dataset diversity in the domain. It uses an additional 3D instance-based vertebral segmentation software bundle for independent vertebrae labelling. It coheres with a post-processing routine to perceive the growth patterns by investigation across the aortic centerline around strong anatomical landmarks. It benefits from supplementary measurement techniques of the maximal diameter and cross-section area for gaining extensive insights of the main characteristics of AAA. The system evaluates the relationship between the AA and vertebra level surface features. Conclusively, it generates a portable document, devised to group the anticipated aneurysmal information. - The 3D CAD system agrees with expert's suggestions about the existence of the aneurysm in 398 institutional images, exhibiting a high capacity to generalize across genders and portions of a full body CT scan using solely radiologist-supported quantitative speculations from the radiology reports. The end-to-end routine achieves an 95.7% dice score coefficient (DSC) on the validation subset for patient-specific cases, indicating a modest agreement with radiologists within an average difference of 0.3 cm in the relative measurement of maximal AAA diameter, thus justifying the possibility of generalizing to the detection of aneurysms using report-based textual information only.}, - file = {Arch22.pdf:pdf\\Arch22.pdf:PDF}, - journal = {Master thesis}, - optnote = {DIAG}, - school = {Radboud University Medical Center}, - year = {2022}, + The motion of this research proposes to develop and validate a fully automated deep learning algorithm to segment and measure AAAs on abdominal CT scans. The computer-aided detection (CAD) model is steered by a self-configuring convolutional neural network (CNN), which plumps for essential decisions in a standardised environment to design the 3D segmentation pipeline, regardless of the dataset diversity in the domain. It uses an additional 3D instance-based vertebral segmentation software bundle for independent vertebrae labelling. It coheres with a post-processing routine to perceive the growth patterns by investigation across the aortic centerline around strong anatomical landmarks. It benefits from supplementary measurement techniques of the maximal diameter and cross-section area for gaining extensive insights of the main characteristics of AAA. The system evaluates the relationship between the AA and vertebra level surface features. Conclusively, it generates a portable document, devised to group the anticipated aneurysmal information. + The 3D CAD system agrees with expert's suggestions about the existence of the aneurysm in 398 institutional images, exhibiting a high capacity to generalize across genders and portions of a full body CT scan using solely radiologist-supported quantitative speculations from the radiology reports. The end-to-end routine achieves an 95.7% dice score coefficient (DSC) on the validation subset for patient-specific cases, indicating a modest agreement with radiologists within an average difference of 0.3 cm in the relative measurement of maximal AAA diameter, thus justifying the possibility of generalizing to the detection of aneurysms using report-based textual information only.}, + file = {Arch22.pdf:pdf\\Arch22.pdf:PDF}, + journal = {Master thesis}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2022}, } @conference{Ardu20, @@ -1444,17 +1600,17 @@ @conference{Ardu20 title = {Artificial Intelligence for the Classification and Quantification of Reticular Pseudodrusen in Multimodal Retinal Images}, url = {https://www.euretina.org/congress/amsterdam-2020/virtual-2020-freepapers/}, abstract = {Purpose: - Reticular pseudodrusen (RPD) are retinal lesions highly correlated with the risk of developing end-stage age-related macular degeneration (AMD) and, therefore, relevant biomarkers for understanding the progression of AMD. Due to the subtle features characterizing RPD, multiple imaging modalities are often necessary to confirm the presence and extension of RPD, considerably increasing the workload of the expert graders. We propose a deep neural network (DNN) architecture that classifies and quantifies RPD using multimodal retinal images. - Setting: - A cross-sectional study that compares the performance of three expert graders with a DNN trained for identifying and quantifying RPD. Conducted on retinal images drawn from the Rotterdam Study, a population-based cohort, in three modalities: color fundus photographs (CFP), fundus autofluorescence images (FAF) and near-infrared reflectance images (NIR). - Methods: - Multimodal images of 278 eyes of 230 patients were retrieved from the Rotterdam Study database. Of those, 72 eyes showed presence of RPD, 108 had soft distinct/indistinct drusen, and 98 had no signs of drusen as confirmed by the Rotterdam Study graders. Delineations of the areas affected with RPD were made in consensus by two human experts using CFP and NIR images simultaneously and were used as reference standard (RS) for RPD area quantification. The data was randomly divided, patient-wise, in training (243) and test (35) sets for model development and evaluation. A DNN was developed for RPD classification and quantification. The proposed DNN is based on an encoder-decoder architecture. The model jointly inputs a set of co-registered retinal image modalities (CFP, NIR, FAF) and outputs a heatmap image containing, per pixel, the likelihood of RPD presence. The 99th percentile of the values contained in this heatmap measures the likelihood of RPD presence. Three independent graders manually delineated RPD in all eyes of the test set based on the CFP and NIR and their performance was compared with the DNN in the tasks of RPD classification and quantification. - Results: - The proposed DNN obtained an area under the receiver operating characteristic curve (AUROC) with 95% confidence interval (CI) of 0.939[0.818-1.0], a sensitivity (SE) of 0.928 and specificity (SP) of 0.809 for the detection of RPD in multimodal imaging. For RPD quantification, the DNN achieved a mean Dice coefficient (DSC) of 0.632+-0.261 and an intra-class correlation (ICC) of 0.676[0.294-0.999]. Comparably, for RPD classification, grader 1 obtained SE/SP pairs of 1.0/0.785, grader 2 of 1.0/0.5 and grader 3 of 1.0/0.785. For RPD quantification, the graders obtained mean DSC of 0.619+-0.196, 0.573+-0.170 and 0.697+-0.157, respectively, and an ICC of 0.721[0.340-0.999], 0.597[0.288-0.999], 0.751[0.294-0.999], respectively. Of the DNN's three false negatives, none of them was correctly classified by the three graders. The model correctly classified RPD in three of the six eyes where graders disagreed and in the only eye where none of the graders found RPD. Overall, 65.1% of the area indicated as RPD by the reference was delineated by at least one grader and only 26.5% of the total was graded as RPD by all experts. The DNN only missed 23.2% of the areas that all three graders identified correctly. - Conclusions: - The proposed DNN showed promising capacities in the tasks of classifying and quantifying RPD lesions on multimodal retinal images. The results show that the model is able to correctly classify and quantify RPD on eyes where lesions are difficult to spot. The probabilistic output of the model allows for the classification of RPD at different levels of confidence and indicates what retinal areas are most likely affected. This is in line with the manual assessment done by the graders. To this point, the model is developed to classify and quantify RPD only on CFP, FAF and NIR. However, introducing other imaging modalities, such as OCT, might help diminish ambiguities in the classification and quantification of this abnormality. Therefore, a future direction for improving the proposed method is to include OCT scans as an additional input to the model. Automatic classification and quantification of RPD using deep learning on multimodal images will enable the automatic and accurate analysis of increasingly large amounts of data for clinical studies and will facilitate AMD screening in the elderly by decreasing the workload of the expert graders. - Financial Disclosure: - None}, + Reticular pseudodrusen (RPD) are retinal lesions highly correlated with the risk of developing end-stage age-related macular degeneration (AMD) and, therefore, relevant biomarkers for understanding the progression of AMD. Due to the subtle features characterizing RPD, multiple imaging modalities are often necessary to confirm the presence and extension of RPD, considerably increasing the workload of the expert graders. We propose a deep neural network (DNN) architecture that classifies and quantifies RPD using multimodal retinal images. + Setting: + A cross-sectional study that compares the performance of three expert graders with a DNN trained for identifying and quantifying RPD. Conducted on retinal images drawn from the Rotterdam Study, a population-based cohort, in three modalities: color fundus photographs (CFP), fundus autofluorescence images (FAF) and near-infrared reflectance images (NIR). + Methods: + Multimodal images of 278 eyes of 230 patients were retrieved from the Rotterdam Study database. Of those, 72 eyes showed presence of RPD, 108 had soft distinct/indistinct drusen, and 98 had no signs of drusen as confirmed by the Rotterdam Study graders. Delineations of the areas affected with RPD were made in consensus by two human experts using CFP and NIR images simultaneously and were used as reference standard (RS) for RPD area quantification. The data was randomly divided, patient-wise, in training (243) and test (35) sets for model development and evaluation. A DNN was developed for RPD classification and quantification. The proposed DNN is based on an encoder-decoder architecture. The model jointly inputs a set of co-registered retinal image modalities (CFP, NIR, FAF) and outputs a heatmap image containing, per pixel, the likelihood of RPD presence. The 99th percentile of the values contained in this heatmap measures the likelihood of RPD presence. Three independent graders manually delineated RPD in all eyes of the test set based on the CFP and NIR and their performance was compared with the DNN in the tasks of RPD classification and quantification. + Results: + The proposed DNN obtained an area under the receiver operating characteristic curve (AUROC) with 95% confidence interval (CI) of 0.939[0.818-1.0], a sensitivity (SE) of 0.928 and specificity (SP) of 0.809 for the detection of RPD in multimodal imaging. For RPD quantification, the DNN achieved a mean Dice coefficient (DSC) of 0.632+-0.261 and an intra-class correlation (ICC) of 0.676[0.294-0.999]. Comparably, for RPD classification, grader 1 obtained SE/SP pairs of 1.0/0.785, grader 2 of 1.0/0.5 and grader 3 of 1.0/0.785. For RPD quantification, the graders obtained mean DSC of 0.619+-0.196, 0.573+-0.170 and 0.697+-0.157, respectively, and an ICC of 0.721[0.340-0.999], 0.597[0.288-0.999], 0.751[0.294-0.999], respectively. Of the DNN's three false negatives, none of them was correctly classified by the three graders. The model correctly classified RPD in three of the six eyes where graders disagreed and in the only eye where none of the graders found RPD. Overall, 65.1% of the area indicated as RPD by the reference was delineated by at least one grader and only 26.5% of the total was graded as RPD by all experts. The DNN only missed 23.2% of the areas that all three graders identified correctly. + Conclusions: + The proposed DNN showed promising capacities in the tasks of classifying and quantifying RPD lesions on multimodal retinal images. The results show that the model is able to correctly classify and quantify RPD on eyes where lesions are difficult to spot. The probabilistic output of the model allows for the classification of RPD at different levels of confidence and indicates what retinal areas are most likely affected. This is in line with the manual assessment done by the graders. To this point, the model is developed to classify and quantify RPD only on CFP, FAF and NIR. However, introducing other imaging modalities, such as OCT, might help diminish ambiguities in the classification and quantification of this abnormality. Therefore, a future direction for improving the proposed method is to include OCT scans as an additional input to the model. Automatic classification and quantification of RPD using deep learning on multimodal images will enable the automatic and accurate analysis of increasingly large amounts of data for clinical studies and will facilitate AMD screening in the elderly by decreasing the workload of the expert graders. + Financial Disclosure: + None}, month = {9}, optnote = {DIAG, RADIOLOGY}, year = {2020}, @@ -1468,6 +1624,8 @@ @article{Ares18 abstract = {We propose iW-Net, a deep learning model that allows for both automatic and interactive segmentation of lung nodules in computed tomography images. iW-Net is composed of two blocks: the first one provides an automatic segmentation and the second one allows to correct it by analyzing 2 points introduced by the user in the nodule's boundary. For this purpose, a physics inspired weight map that takes the user input into account is proposed, which is used both as a feature map and in the system's loss function. Our approach is extensively evaluated on the public LIDC-IDRI dataset, where we achieve a state-of-the-art performance of 0.55 intersection over union vs the 0.59 inter-observer agreement. Also, we show that iW-Net allows to correct the segmentation of small nodules, essential for proper patient referral decision, as well as improve the segmentation of the challenging non-solid nodules and thus may be an important tool for increasing the early diagnosis of lung cancer.}, optnote = {DIAG}, month = {11}, + all_ss_ids = {['b3dc561dd990cebc626e10318b8582a198aa3571']}, + gscites = {63}, } @inproceedings{Ares18a, @@ -1482,7 +1640,9 @@ @inproceedings{Ares18a file = {:pdf\\Ares18a.pdf:PDF}, optnote = {DIAG}, gsid = {5919517379453999457}, - gscites = {7}, + gscites = {15}, + ss_id = {ab99f176fa295a776d6a1fa98d5a5c0156fd4302}, + all_ss_ids = {['ab99f176fa295a776d6a1fa98d5a5c0156fd4302']}, } @article{Ares19, @@ -1501,8 +1661,10 @@ @article{Ares19 pmid = {31406194}, month = {8}, gsid = {17366457829799450871}, - gscites = {8}, + gscites = {63}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/207075}, + ss_id = {b3dc561dd990cebc626e10318b8582a198aa3571}, + all_ss_ids = {['b3dc561dd990cebc626e10318b8582a198aa3571']}, } @article{Argu19, @@ -1515,6 +1677,8 @@ @article{Argu19 file = {Argu19.pdf:pdf\\Argu19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {9}, + all_ss_ids = {['a9a9dfdccb4e656e7ff2b88fc88666415ab7e189', 'cc7605f2b7e61723f12839fabc1066da0cc8744b', '6425e3f4c37f8deb9e8dc933e34d49aa843635b9']}, + gscites = {0}, } @article{Arma08, @@ -1549,6 +1713,9 @@ @article{Arma18 optnote = {DIAG, RADIOLOGY}, pmid = {30840739}, month = {11}, + ss_id = {86cfa3c1324e704263cd7e39d9bed566c47f556e}, + all_ss_ids = {['86cfa3c1324e704263cd7e39d9bed566c47f556e']}, + gscites = {105}, } @conference{Arnt15, @@ -1566,15 +1733,17 @@ @article{Arnt16 year = {2016}, doi = {10.1212/WNL.0000000000003123}, abstract = {Objective: To study the long-term prevalence of small vessel disease after young stroke and to compare this to healthy controls. - Methods: This prospective cohort study comprises 337 patients with an ischemic stroke or TIA, aged 18-50 years without a history of TIA or stroke. In addition 90 age and sex matched controls were included. At follow-up lacunes, microbleeds and white matter hyperintensitie (WMH) volume were assessed using MRI. To investigate the relation between riks factors and small vessel disease, logistic and linear regression were used. - Results: After mean follow-up of 9.9 (SD 8.1) years, 337 patients were included (227 with an ischemic stroke and 110 with a TIA). Mean age for patients was 49.8 (SD 10.3) years and 45.4% were men, for controls mean age was 49.4 (SD 11.9) and 45.6% were men. Compared with controls, patients more often had at least one lacune (24.0% versus 4.5%, p<0.0001). In addition, they had a higher WMH-volume (median 1.5 ml (IQR 0.5-3.7) versus 0.4 ml (IQR 0.0-1.0), p<0.001). Compared with controls, patients had the same volume of WMH on average 10-20 years earlier. In the patient group, age at stroke (beta=0.03 (95%CI 0.02-0.04) hypertension (beta=0.22, 95%CI 0.04-0.39) and smoking (beta=0.18, 95%CI 0.01-0.34) at baseline were associated with WMH-volume. - Conclusions: Patients with a young stroke have a higher burden of small vessel disease than controls adjusted for confounders. Cerebral aging seems accelerated by 10-20 years in these patients, which may suggest an increased vulnerability to vascular risk factors.}, + Methods: This prospective cohort study comprises 337 patients with an ischemic stroke or TIA, aged 18-50 years without a history of TIA or stroke. In addition 90 age and sex matched controls were included. At follow-up lacunes, microbleeds and white matter hyperintensitie (WMH) volume were assessed using MRI. To investigate the relation between riks factors and small vessel disease, logistic and linear regression were used. + Results: After mean follow-up of 9.9 (SD 8.1) years, 337 patients were included (227 with an ischemic stroke and 110 with a TIA). Mean age for patients was 49.8 (SD 10.3) years and 45.4% were men, for controls mean age was 49.4 (SD 11.9) and 45.6% were men. Compared with controls, patients more often had at least one lacune (24.0% versus 4.5%, p<0.0001). In addition, they had a higher WMH-volume (median 1.5 ml (IQR 0.5-3.7) versus 0.4 ml (IQR 0.0-1.0), p<0.001). Compared with controls, patients had the same volume of WMH on average 10-20 years earlier. In the patient group, age at stroke (beta=0.03 (95%CI 0.02-0.04) hypertension (beta=0.22, 95%CI 0.04-0.39) and smoking (beta=0.18, 95%CI 0.01-0.34) at baseline were associated with WMH-volume. + Conclusions: Patients with a young stroke have a higher burden of small vessel disease than controls adjusted for confounders. Cerebral aging seems accelerated by 10-20 years in these patients, which may suggest an increased vulnerability to vascular risk factors.}, file = {Arnt16.pdf:pdf\\Arnt16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {27521431}, month = {8}, gsid = {17987708002828940533}, - gscites = {18}, + gscites = {27}, + ss_id = {c73a1da11addbc1436980c4a327124bfacc491d3}, + all_ss_ids = {['c73a1da11addbc1436980c4a327124bfacc491d3']}, } @article{Arta09, @@ -1594,6 +1763,8 @@ @article{Arta09 gsid = {7831132571538357123}, gscites = {38}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/79921}, + ss_id = {6f8ce3c9888cefd812fc1de939e5f7a73786152e}, + all_ss_ids = {['6f8ce3c9888cefd812fc1de939e5f7a73786152e']}, } @inproceedings{Arta09a, @@ -1612,6 +1783,8 @@ @inproceedings{Arta09a month = {2}, gsid = {4299735518106080724}, gscites = {3}, + ss_id = {d2056f4696e6badb76a795147c1f7fbb07ecf076}, + all_ss_ids = {['d2056f4696e6badb76a795147c1f7fbb07ecf076']}, } @inproceedings{Arzh06, @@ -1625,6 +1798,8 @@ @inproceedings{Arzh06 optnote = {DIAG, RADIOLOGY}, gsid = {12424497119551485003}, gscites = {4}, + ss_id = {e03a6b506700d339efbf75c7547aa6507ec792ae}, + all_ss_ids = {['e03a6b506700d339efbf75c7547aa6507ec792ae']}, } @inproceedings{Arzh06a, @@ -1642,6 +1817,8 @@ @inproceedings{Arzh06a month = {3}, gsid = {9920783540454814909}, gscites = {12}, + ss_id = {a96e8a989d891bf4d8b015397a1c6ebf1cf39012}, + all_ss_ids = {['a96e8a989d891bf4d8b015397a1c6ebf1cf39012']}, } @article{Arzh07, @@ -1689,7 +1866,9 @@ @inproceedings{Arzh07c file = {Arzh07c.pdf:pdf\\Arzh07c.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {14279526554979730903}, - gscites = {5}, + gscites = {6}, + ss_id = {3aed0d4fafa12efe822da4f3614caa46da2cbcf1}, + all_ss_ids = {['3aed0d4fafa12efe822da4f3614caa46da2cbcf1']}, } @article{Arzh09, @@ -1706,7 +1885,9 @@ @article{Arzh09 optnote = {DIAG, RADIOLOGY, TB}, number = {9}, month = {9}, - gscites = {29}, + gscites = {35}, + ss_id = {445c06e086a985f3b08982ec6cc126bd92e40d11}, + all_ss_ids = {['445c06e086a985f3b08982ec6cc126bd92e40d11']}, } @inproceedings{Arzh09b, @@ -1722,7 +1903,9 @@ @inproceedings{Arzh09b optnote = {DIAG, RADIOLOGY, TB}, number = {5762}, gsid = {14232725066672225972}, - gscites = {24}, + gscites = {25}, + ss_id = {e007b22f9cd225c0831cafe69b2074c5ca2a6da1}, + all_ss_ids = {['e007b22f9cd225c0831cafe69b2074c5ca2a6da1']}, } @phdthesis{Arzh09c, @@ -1756,6 +1939,8 @@ @article{Arzh10 gsid = {10305933231621539339}, gscites = {24}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/88430}, + ss_id = {e9a1022bef95937ea3a55df1ea1d5fd3aec2ebf5}, + all_ss_ids = {['e9a1022bef95937ea3a55df1ea1d5fd3aec2ebf5']}, } @conference{Aswo19, @@ -1763,12 +1948,12 @@ @conference{Aswo19 booktitle = {European Congress of Pathology}, title = {Potential of an AI-based digital biomarker to predict neoadjuvant chemotherapy response from preoperative biopsies of Luminal-B breast cancer}, abstract = {Background & objectives: Invasive breast cancer (IBC) is increasingly treated with neoadjuvant chemotherapy. Yet, only 15-20% of Luminal-B patients achieve pathological complete response (pCR). We developed an AI-based biomarker to predict pCR of Luminal-B IBC from preoperative biopsies stained with H&E. - - Methods: First, we trained a deep learning model on a multi-centric dataset of n=277 manually annotated breast cancer H&E-stained histopathology images to segment tumour, lymphocytes and other tissue. Second, we applied the segmentation model to an independent set of n=297 Luminal-B pre-treatment biopsies. For each case, we computed our biomarker: the proportion of tumour within 80mm distance from lymphocyte regions. - - Results: From the Luminal-B cohort, 32/297 cases (11%) were labelled as "pCR" when no remaining cancer cells were reported for the post-operative surgical resection. The biomarker showed significant (p<<0.01) correlation with pCR with a point biserial correlation coefficient of 0.27. Setting a cut-off value based on the optimal operating point of the ROC curve (AUC=0.69), we reached a sensitivity of 0.53 and a specificity of 0.74. - - Conclusion: The developed deep-learning based biomarker quantifies the proportion of inflammatory tumour regions. It shows promising results for predicting pCR for Luminal-B breast cancer from pre-treatment biopsies stained with H&E.}, + + Methods: First, we trained a deep learning model on a multi-centric dataset of n=277 manually annotated breast cancer H&E-stained histopathology images to segment tumour, lymphocytes and other tissue. Second, we applied the segmentation model to an independent set of n=297 Luminal-B pre-treatment biopsies. For each case, we computed our biomarker: the proportion of tumour within 80mm distance from lymphocyte regions. + + Results: From the Luminal-B cohort, 32/297 cases (11%) were labelled as "pCR" when no remaining cancer cells were reported for the post-operative surgical resection. The biomarker showed significant (p<<0.01) correlation with pCR with a point biserial correlation coefficient of 0.27. Setting a cut-off value based on the optimal operating point of the ROC curve (AUC=0.69), we reached a sensitivity of 0.53 and a specificity of 0.74. + + Conclusion: The developed deep-learning based biomarker quantifies the proportion of inflammatory tumour regions. It shows promising results for predicting pCR for Luminal-B breast cancer from pre-treatment biopsies stained with H&E.}, optnote = {DIAG, RADIOLOGY}, year = {2019}, } @@ -1782,8 +1967,81 @@ @inproceedings{Aswo21 pages = {1 -- 7}, year = {2021}, doi = {10.1117/12.2581943}, + ss_id = {6b71ee48bb52c6aab084e7325669e0116dda292b}, + all_ss_ids = {['6b71ee48bb52c6aab084e7325669e0116dda292b']}, + gscites = {8}, +} + +@article{Aswo23, + author = {Aswolinskiy, Witali and Munari, Enrico and Horlings, Hugo M. and Mulder, Lennart and Bogina, Giuseppe and Sanders, Joyce and Liu, Yat-Hee and van den Belt-Dusebout, Alexandra W. and Tessier, Leslie and Balkenhol, Maschenka and Stegeman, Michelle and Hoven, Jeffrey and Wesseling, Jelle and van der Laak, Jeroen and Lips, Esther H. and Ciompi, Francesco}, + title = {PROACTING: predicting pathological complete response to neoadjuvant chemotherapy in breast cancer from routine diagnostic histopathology biopsies with deep learning}, + doi = {10.1186/s13058-023-01726-0}, + url = {http://dx.doi.org/10.1186/s13058-023-01726-0}, + volume = {25}, + abstract = {Abstract + Background + Invasive breast cancer patients are increasingly being treated with neoadjuvant chemotherapy; however, only a fraction of the patients respond to it completely. To prevent overtreatment, there is an urgent need for biomarkers to predict treatment response before administering the therapy. + + Methods + In this retrospective study, we developed hypothesis-driven interpretable biomarkers based on deep learning, to predict the pathological complete response (pCR, i.e., the absence of tumor cells in the surgical resection specimens) to neoadjuvant chemotherapy solely using digital pathology H&E images of pre-treatment breast biopsies. Our approach consists of two steps: First, we use deep learning to characterize aspects of the tumor micro-environment by detecting mitoses and segmenting tissue into several morphology compartments including tumor, lymphocytes and stroma. Second, we derive computational biomarkers from the segmentation and detection output to encode slide-level relationships of components of the tumor microenvironment, such as tumor and mitoses, stroma, and tumor infiltrating lymphocytes(TILs). + + Results + We developed and evaluated our method on slides from n=721 patients from three European medical centers with triple-negative and Luminal B breast cancers and performed external independent validation on n=126 patients from a public dataset. We report the predictive value of the investigated biomarkers for predicting pCR with areas under the receiver operating characteristic curve between 0.66 and 0.88 across the tested cohorts. + + Conclusion + The proposed computational biomarkers predict pCR, but will require more evaluation and finetuning for clinical application. Our results further corroborate the potential role of deep learning to automate TILs quantification, and their predictive value in breast cancer neoadjuvant treatment planning, along with automated mitoses quantification. We made our method publicly available to extract segmentation-based biomarkers for research purposes.}, + automatic = {yes}, + citation-count = {0}, + file = {Aswo23.pdf:pdf\\Aswo23.pdf:PDF}, + journal = {Breast Cancer Research}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + +@article{Aubr21, + author = {Aubreville, Marc and Bertram, Christof and Veta, Mitko and Klopfleisch, Robert and Stathonikos, Nikolas and Breininger, Katharina and ter Hoeve, Natalie and Ciompi, Francesco and Maier, Andreas}, + title = {~~Quantifying the Scanner-Induced Domain Gap in Mitosis Detection}, + doi = {10.48550/ARXIV.2103.16515}, + year = {2021}, + abstract = {Automated detection of mitotic figures in histopathology images has seen vast improvements, thanks to modern deep learning-based pipelines. Application of these methods, however, is in practice limited by strong variability of images between labs. This results in a domain shift of the images, which causes a performance drop of the models. Hypothesizing that the scanner device plays a decisive role in this effect, we evaluated the susceptibility of a standard mitosis detection approach to the domain shift introduced by using a different whole slide scanner. Our work is based on the MICCAI-MIDOG challenge 2021 data set, which includes 200 tumor cases of human breast cancer and four scanners. Our work indicates that the domain shift induced not by biochemical variability but purely by the choice of acquisition device is underestimated so far. Models trained on images of the same scanner yielded an average F1 score of 0.683, while models trained on a single other scanner only yielded an average F1 score of 0.325. Training on another multi-domain mitosis dataset led to mean F1 scores of 0.52. We found this not to be reflected by domain-shifts measured as proxy A distance-derived metric.}, + url = {https://arxiv.org/abs/2103.16515}, + file = {Aubr21.pdf:pdf\\Aubr21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2103.16515}, + automatic = {yes}, +} + +@article{Aubr22, + author = {Aubreville, Marc and Stathonikos, Nikolas and Bertram, Christof A. and Klopfleisch, Robert and Ter Hoeve, Natalie and Ciompi, Francesco and Wilm, Frauke and Marzahl, Christian and Donovan, Taryn A. and Maier, Andreas and Breen, Jack and Ravikumar, Nishant and Chung, Youjin and Park, Jinah and Nateghi, Ramin and Pourakpour, Fattaneh and Fick, Rutger H. J. and Ben Hadj, Saima and Jahanifar, Mostafa and Shephard, Adam and Dexl, Jakob and Wittenberg, Thomas and Kondo, Satoshi and Lafarge, Maxime W. and Koelzer, Viktor H. and Liang, Jingtang and Wang, Yubo and Long, Xi and Liu, Jingxin and Razavi, Salar and Khademi, April and Yang, Sen and Wang, Xiyue and Erber, Ramona and Klang, Andrea and Lipnik, Karoline and Bolfa, Pompei and Dark, Michael J. and Wasinger, Gabriel and Veta, Mitko and Breininger, Katharina}, + title = {Mitosis domain generalization in histopathology images - The MIDOG challenge.}, + doi = {10.1016/j.media.2022.102699}, + pages = {102699}, + volume = {84}, + abstract = {The density of mitotic figures (MF) within tumor tissue is known to be highly correlated with tumor proliferation and thus is an important marker in tumor grading. Recognition of MF by pathologists is subject to a strong inter-rater bias, limiting its prognostic value. State-of-the-art deep learning methods can support experts but have been observed to strongly deteriorate when applied in a different clinical environment. The variability caused by using different whole slide scanners has been identified as one decisive component in the underlying domain shift. The goal of the MICCAI MIDOG 2021 challenge was the creation of scanner-agnostic MF detection algorithms. The challenge used a training set of 200 cases, split across four scanning systems. As test set, an additional 100 cases split across four scanning systems, including two previously unseen scanners, were provided. In this paper, we evaluate and compare the approaches that were submitted to the challenge and identify methodological factors contributing to better performance. The winning algorithm yielded an F score of 0.748 (CI95: 0.704-0.781), exceeding the performance of six experts on the same task.}, + file = {Aubr22.pdf:pdf\\Aubr22.pdf:PDF}, + journal = {Medical Image Analysis}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36463832}, + year = {2022}, + ss_id = {18e784586cb41d5dfc28e84f4f1c390086c6516d}, + all_ss_ids = {['18e784586cb41d5dfc28e84f4f1c390086c6516d']}, + gscites = {48}, } +@book{Aubr23, + author = {Aubreville, Marc and Stathonikos, Nikolas and Bertram, Christof A. and Klopfleisch, Robert and Hoeve, Natalie ter and Ciompi, Francesco and Wilm, Frauke and Marzahl, Christian and Donovan, Taryn A. and Maier, Andreas and Veta, Mitko and Breininger, Katharina}, + title = {Abstract: the MIDOG Challenge 2021}, + doi = {10.1007/978-3-658-41657-7_26}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-658-41657-7_26}, + file = {Aubr23.pdf:pdf\Aubr23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Bildverarbeitung fur die Medizin, Workshop}, + citation-count = {0}, + automatic = {yes}, + pages = {115-115}, +} @inproceedings{Avoi13, author = {van der Avoird, Andre and Lin, Ning and van Ginneken, Bram and Manniesing, Rashindra}, @@ -1796,55 +2054,61 @@ @inproceedings{Avoi13 file = {Avoi13.pdf:pdf\\Avoi13.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {3}, + ss_id = {cbbe7f568d45425dc6410a27c56468f9fdfefab1}, + all_ss_ids = {['cbbe7f568d45425dc6410a27c56468f9fdfefab1']}, + gscites = {1}, } -@Article{Ayat20, - author = {Ayatollahi, Fazael and Shokouhi, Shahriar B. and Teuwen, Jonas}, - title = {Differentiating Benign and Malignant Mass and non-Mass Lesions in Breast {DCE-MRI} using Normalized Frequency-based Features}, - doi = {10.1007/s11548-019-02103-z}, - issue = {2}, - pages = {297-307}, - volume = {15}, +@article{Ayat20, + author = {Ayatollahi, Fazael and Shokouhi, Shahriar B. and Teuwen, Jonas}, + title = {Differentiating Benign and Malignant Mass and non-Mass Lesions in Breast {DCE-MRI} using Normalized Frequency-based Features}, + doi = {10.1007/s11548-019-02103-z}, + issue = {2}, + pages = {297-307}, + volume = {15}, abstract = {Purpose: In this study we propose a new computer-aided diagnosis (CADx) to distinguish between malign and benign mass and non-mass lesions in breast DCE-MRI. For this purpose, we introduce new frequency textural features. - Methods: In this paper we propose novel normalized frequency-based features. These are obtained by applying the dual-tree complex wavelet transform to MRI slices containing a lesion for specific decomposition levels. The low-pass and band-pass frequency coefficients of the dual-tree complex wavelet transform represent the general shape and texture features respectively of the lesion. The extraction of these features is computationally efficient. We employ a support vector machine (SVM) to classify the lesions, and investigate modified cost functions and under- and oversampling strategies to handle the class imbalance. - Results: The proposed method has been tested on a dataset of 80 patients containing 103 lesions. An area under the curve (AUC) of 0.98 for the mass and 0.94 for the non-mass lesions is obtained. Similarly, accuracies of 96.9% and 89.8%, sensitivities of 93.8% and 84.6% and specificities of 98% and 92.3% are obtained for the mass and non-mass lesions respectively. - Conclusions: Normalized frequency-based features can characterize benign and malignant lesions efficiently in both mass and non-mass like lesions. Additionally, the combination of normalized frequency-based features and three dimensional shape descriptors improve the CADx performance.}, - file = {Ayat19.pdf:pdf\\Ayat19.pdf:PDF}, - journal = IJCARS, - month = {12}, - optnote = {DIAG, RADIOLOGY}, - pmid = {31838643}, - year = {2020}, + Methods: In this paper we propose novel normalized frequency-based features. These are obtained by applying the dual-tree complex wavelet transform to MRI slices containing a lesion for specific decomposition levels. The low-pass and band-pass frequency coefficients of the dual-tree complex wavelet transform represent the general shape and texture features respectively of the lesion. The extraction of these features is computationally efficient. We employ a support vector machine (SVM) to classify the lesions, and investigate modified cost functions and under- and oversampling strategies to handle the class imbalance. + Results: The proposed method has been tested on a dataset of 80 patients containing 103 lesions. An area under the curve (AUC) of 0.98 for the mass and 0.94 for the non-mass lesions is obtained. Similarly, accuracies of 96.9% and 89.8%, sensitivities of 93.8% and 84.6% and specificities of 98% and 92.3% are obtained for the mass and non-mass lesions respectively. + Conclusions: Normalized frequency-based features can characterize benign and malignant lesions efficiently in both mass and non-mass like lesions. Additionally, the combination of normalized frequency-based features and three dimensional shape descriptors improve the CADx performance.}, + file = {Ayat19.pdf:pdf\\Ayat19.pdf:PDF}, + journal = IJCARS, + optnote = {DIAG, RADIOLOGY}, + pmid = {31838643}, + year = {2020}, + month = {12}, + ss_id = {f435c3f782e98246413f30adcf8f78899650d23f}, + all_ss_ids = {['f435c3f782e98246413f30adcf8f78899650d23f']}, + gscites = {5}, } @mastersthesis{Bagu18, author = {Ines Correia Bagulho}, title = {Reference Tissue Normalization of Prostate MRI with automatic Multi-Organ Deep Learning Pelvis segmentation}, abstract = {Prostate cancer is the most common cancer among male patients and second leading cause of death - from cancer in men (excluding non-melanoma skin cancer). Magnetic Resonance Imaging (MRI) is - currently becoming the modality of choice for clinical staging of localized prostate cancer. However, - MRI lacks intensity quantification which hinders its diagnostic ability. The overall aim of this dissertation - is to automate a novel normalization method that can potentially quantify general MR intensities, thus - improving the diagnostic ability of MRI. - Two Prostate multi-parametric MRI cohorts, of 2012 and 2016, were used in this retrospective study. To - improve the diagnostic ability of T2-Weighted MRI, a novel multi-reference tissue normalization method - was tested and automated. This method consists of computing the average intensity of the referencetissues - and the corresponding normalized reference values to define a look-up-table through interpolation. - Since the method requires delineation of multiple reference tissues, an MRI-specific Deep Learning - model, Aniso-3DUNET, was trained on manual segmentations and tested to automate this segmentation - step. The output of the Deep Learning model, that consisted of automatic segmentations, was validated - and used in an automatic normalization approach. The effect of the manual and automatic normalization - approaches on diagnostic accuracy of T2-weighted intensities was determined with Receiver Operating - Characteristic (ROC) analyses. The Areas Under the Curve (AUC) were compared. - The automatic segmentation of multiple reference-tissues was validated with an average DICE score - higher than 0.8 in the test phase. Thereafter, the method developed demonstrated that the normalized - intensities lead to an improved diagnostic accuracy over raw intensities using the manual approach, with - an AUC going from 0.54 (raw) to 0.68 (normalized), and automatic approach, with an AUC going from - 0.68 to 0.73. - This study demonstrates that multi-reference tissue normalization improves quantification of T2-weighted - images and diagnostic accuracy, possibly leading to a decrease in radiologist's interpretation variability. - It is also possible to conclude that this novel T2-weighted MRI normalization method can be automatized, - becoming clinically applicable.}, + from cancer in men (excluding non-melanoma skin cancer). Magnetic Resonance Imaging (MRI) is + currently becoming the modality of choice for clinical staging of localized prostate cancer. However, + MRI lacks intensity quantification which hinders its diagnostic ability. The overall aim of this dissertation + is to automate a novel normalization method that can potentially quantify general MR intensities, thus + improving the diagnostic ability of MRI. + Two Prostate multi-parametric MRI cohorts, of 2012 and 2016, were used in this retrospective study. To + improve the diagnostic ability of T2-Weighted MRI, a novel multi-reference tissue normalization method + was tested and automated. This method consists of computing the average intensity of the referencetissues + and the corresponding normalized reference values to define a look-up-table through interpolation. + Since the method requires delineation of multiple reference tissues, an MRI-specific Deep Learning + model, Aniso-3DUNET, was trained on manual segmentations and tested to automate this segmentation + step. The output of the Deep Learning model, that consisted of automatic segmentations, was validated + and used in an automatic normalization approach. The effect of the manual and automatic normalization + approaches on diagnostic accuracy of T2-weighted intensities was determined with Receiver Operating + Characteristic (ROC) analyses. The Areas Under the Curve (AUC) were compared. + The automatic segmentation of multiple reference-tissues was validated with an average DICE score + higher than 0.8 in the test phase. Thereafter, the method developed demonstrated that the normalized + intensities lead to an improved diagnostic accuracy over raw intensities using the manual approach, with + an AUC going from 0.54 (raw) to 0.68 (normalized), and automatic approach, with an AUC going from + 0.68 to 0.73. + This study demonstrates that multi-reference tissue normalization improves quantification of T2-weighted + images and diagnostic accuracy, possibly leading to a decrease in radiologist's interpretation variability. + It is also possible to conclude that this novel T2-weighted MRI normalization method can be automatized, + becoming clinically applicable.}, file = {Bagu18.pdf:pdf/Bagu18.pdf:PDF}, optnote = {DIAG}, school = {Universidade De Lisboa}, @@ -1867,8 +2131,10 @@ @article{Baid18 optnote = {DIAG}, pmid = {29924891}, gsid = {4937409988134534335}, - gscites = {21}, + gscites = {70}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/197167}, + ss_id = {c549a002b6c2d5849674288cdf82bc2b2c0629d7}, + all_ss_ids = {['c549a002b6c2d5849674288cdf82bc2b2c0629d7']}, } @article{Baid18a, @@ -1885,7 +2151,26 @@ @article{Baid18a optnote = {DIAG}, pmid = {29893996}, gsid = {10398339840051479936}, - gscites = {6}, + gscites = {16}, + ss_id = {4c56dbeda2cb93aadcc8e5c697ccb6f44283799b}, + all_ss_ids = {['4c56dbeda2cb93aadcc8e5c697ccb6f44283799b']}, +} + +@article{Baid23, + author = {Baidoshvili, Alexi and Khacheishvili, Mariam and van der Laak, Jeroen A. W. M. and van Diest, Paul J.}, + title = {~~A whole-slide imaging based workflow reduces the reading time of pathologists}, + doi = {10.1111/pin.13309}, + year = {2023}, + abstract = {AbstractEven though entirely digitized microscopic tissue sections (whole slide images, WSIs) are increasingly being used in histopathology diagnostics, little data is still available on the effect of this technique on pathologists' reading time. This study aimed to compare the time required to perform the microscopic assessment by pathologists between a conventional workflow (an optical microscope) and digitized WSIs. WSI was used in primary diagnostics at the Laboratory for Pathology Eastern Netherlands for several years (LabPON, Hengelo, The Netherlands). Cases were read either in a traditional workflow, with the pathologist recording the time required for diagnostics and reporting, or entirely digitally. Reading times were extracted from image management system log files, and the digitized workflow was fully integrated into the laboratory information system. The digital workflow saved time in the majority of case categories, with prostate biopsies saving the most (68% time gain). Taking into account case distribution, the digital workflow produced an average gain of 12.3%. Using WSI instead of conventional microscopy significantly reduces pathologists' reading times. Pathologists must work in a fully integrated environment to fully reap the benefits of a digital workflow.}, + url = {http://dx.doi.org/10.1111/pin.13309}, + file = {Baid23.pdf:pdf\\Baid23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Pathology International}, + automatic = {yes}, + citation-count = {1}, + pages = {127-134}, + volume = {73}, + pmid = {36692113}, } @article{Bakk19, @@ -1903,7 +2188,9 @@ @article{Bakk19 optnote = {DIAG}, pmid = {31774954}, gsid = {13397244904340425857}, - gscites = {43}, + gscites = {294}, + ss_id = {60ba4be07cc7c44df3c4baf8ceece4b460bdb774}, + all_ss_ids = {['60ba4be07cc7c44df3c4baf8ceece4b460bdb774']}, } @article{Bala05, @@ -1937,8 +2224,9 @@ @article{Balk19 optnote = {DIAG}, pmid = {30989469}, gsid = {12757377873820382256}, - gscites = {8}, + gscites = {18}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/206059}, + all_ss_ids = {['91bcebba717670d740ef76097a0885a0bd3c9dde', '94962ef85aca6df5c8289874b56bcaa8722dc596']}, } @article{Balk19b, @@ -1954,7 +2242,9 @@ @article{Balk19b optnote = {DIAG, RADIOLOGY}, pmid = {31222166}, gsid = {6608334460566471833}, - gscites = {7}, + gscites = {65}, + ss_id = {66f70286594b27928fb0eea3b4a0f292261c292f}, + all_ss_ids = {['2125835bf1c4fd0646b5dd50855d647044c07658', '66f70286594b27928fb0eea3b4a0f292261c292f']}, } @article{Balk20, @@ -1971,6 +2261,9 @@ @article{Balk20 optnote = {DIAG}, pmid = {32179443}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/221654}, + ss_id = {2c00230262e1e99862ab27743b725e222a74dcca}, + all_ss_ids = {['2c00230262e1e99862ab27743b725e222a74dcca']}, + gscites = {20}, } @phdthesis{Balk20a, @@ -1978,13 +2271,13 @@ @phdthesis{Balk20a title = {Tissue-based biomarker assessment for predicting prognosis of triple negative breast cancer: the additional value of artificial intelligence}, url = {https://repository.ubn.ru.nl/handle/2066/220344}, abstract = {Despite much research, currently still about 1 in 4 patients with TNBC will develop a recurrence after which the survival outlook is very poor. To date, no prognostic markers are available for TNBC to adequately stratify patients for the risk of developing a recurrence. The emergence of powerful computer algorithms, in particular deep learning models, enable more in depth and more extensive biomarker exploration. In addition, these algorithms are objective and reproducible, in contrast to most human visual biomarker assessment. The first aim of this thesis was to establish a well-defined cohort of TNBC, consisting of tissue sections, clinical and pathology data as well as comprehensive follow up data. Secondly, we aimed to evaluate the prognostic value of the mitotic count, which has widespread clinical use as part of the Nottingham grading system. We studied mitotic count both via conventional manual assessment and automatic assessment, to see if we could find a cut-off value which is better tailored for TNBC. Our third aim was to evaluate the prognostic value of TILs, a promising biomarker not yet used in clinical practice. - - To study the prognostic value of biomarkers in TNBC, the following objectives were defined: - 1. Establish a multicentre TNBC cohort including tissue sections and follow up data (Chapter 2) - 2. Develop a baseline prognostic model for TNBC based on the currently known clinicopathological variables (Chapter 2) - 3. Establish a computer algorithm (Chapter 3) which can automatically find mitoses in WSI of breast cancer, and validate the algorithm (Chapter 4) - 4. Explore the prognostic value of the mitotic count for TNBC using manual and automatic assessment (Chapter 5) - 5. Optimize the assessment of tumour infiltrating lymphocytes using deep learning and study its prognostic value in TNBC (Chapter 6)}, + + To study the prognostic value of biomarkers in TNBC, the following objectives were defined: + 1. Establish a multicentre TNBC cohort including tissue sections and follow up data (Chapter 2) + 2. Develop a baseline prognostic model for TNBC based on the currently known clinicopathological variables (Chapter 2) + 3. Establish a computer algorithm (Chapter 3) which can automatically find mitoses in WSI of breast cancer, and validate the algorithm (Chapter 4) + 4. Explore the prognostic value of the mitotic count for TNBC using manual and automatic assessment (Chapter 5) + 5. Optimize the assessment of tumour infiltrating lymphocytes using deep learning and study its prognostic value in TNBC (Chapter 6)}, copromotor = {P. Bult and F. Ciompi}, file = {Balk20a.pdf:pdf\\Balk20.pdf:PDF}, optnote = {DIAG}, @@ -1994,21 +2287,20 @@ @phdthesis{Balk20a journal = {PhD thesis}, } -@Conference{Balk20b, - author = {M. Balkenhol and P. Bult and D. Tellez and W. Vreuls and P. Clahsen and F. Ciompi and J. Van der Laak}, +@conference{Balk20b, + author = {M. Balkenhol and P. Bult and D. Tellez and W. Vreuls and P. Clahsen and F. Ciompi and J. Van der Laak}, booktitle = {European Journal of Cancer}, - title = {Deep learning enables fully automated mitotic density assessment in breast cancer histopathology}, - abstract = {Background: Mitosis counting is an important part of breast cancer grading, yet known to suffer from observer variability. Advances in machine learning enable fully automated analysis of digitized glass slides. The present study evaluated automatic mitosis counting and demonstrated applicability on triple negative breast cancers (TNBC). -Material and Methods: In entire scanned H&E slides of 90 invasive breast tumours, a deep learning algorithm (DLA) fully automatically detected all mitoses and determined the hotspot (area with highest mitotic density). Subsequently, two independent observers assessed mitotic counts on glass slides according to routine practice, and in the computer-defined hotspot. -Next, automated mitotic counting was performed in our TNBC cohort (n = 597). Multivariable Cox regression survival models were expanded with dichotomized mitotic counts. The c-statistic was used to evaluate the additional prognostic value of every possible cut off value. -Results: Automatic counting showed excellent concordance with visual assessment in computer detected hotspots with intraclass correlation coefficients (ICC) of 0.895 (95% CI 0.845–0.930) and 0.888 (95% CI 0.783–0.936) for two observers, respectively. ICC of fully automated counting versus conventional glass slide assessment were 0.828 (95% CI 0.750–0.883 and 0.757 (95% CI 0.638–0.839), respectively. -In the TNBC cohort, automatic mitotic counts ranged from 1 to 269 (mean 57.6) in 2 mm2 hotspots. None of the cut off values improved the models’ baseline c-statistic. -Conclusion: Automatic mitosis counting is a promising complementary aid for mitoses assessment. Our method was capable of fully automatically locating the mitotic hotspot in tumours, and was capable of processing a large series of TNBC, showing that mitotic count was not prognostic for TNBC even when attempting alternative cut off points.}, - optnote = {DIAG, RADIOLOGY}, - year = {2020}, + title = {Deep learning enables fully automated mitotic density assessment in breast cancer histopathology}, + abstract = {Background: Mitosis counting is an important part of breast cancer grading, yet known to suffer from observer variability. Advances in machine learning enable fully automated analysis of digitized glass slides. The present study evaluated automatic mitosis counting and demonstrated applicability on triple negative breast cancers (TNBC). + Material and Methods: In entire scanned H&E slides of 90 invasive breast tumours, a deep learning algorithm (DLA) fully automatically detected all mitoses and determined the hotspot (area with highest mitotic density). Subsequently, two independent observers assessed mitotic counts on glass slides according to routine practice, and in the computer-defined hotspot. + Next, automated mitotic counting was performed in our TNBC cohort (n = 597). Multivariable Cox regression survival models were expanded with dichotomized mitotic counts. The c-statistic was used to evaluate the additional prognostic value of every possible cut off value. + Results: Automatic counting showed excellent concordance with visual assessment in computer detected hotspots with intraclass correlation coefficients (ICC) of 0.895 (95% CI 0.845-0.930) and 0.888 (95% CI 0.783-0.936) for two observers, respectively. ICC of fully automated counting versus conventional glass slide assessment were 0.828 (95% CI 0.750-0.883 and 0.757 (95% CI 0.638-0.839), respectively. + In the TNBC cohort, automatic mitotic counts ranged from 1 to 269 (mean 57.6) in 2 mm2 hotspots. None of the cut off values improved the models' baseline c-statistic. + Conclusion: Automatic mitosis counting is a promising complementary aid for mitoses assessment. Our method was capable of fully automatically locating the mitotic hotspot in tumours, and was capable of processing a large series of TNBC, showing that mitotic count was not prognostic for TNBC even when attempting alternative cut off points.}, + optnote = {DIAG, RADIOLOGY}, + year = {2020}, } - @article{Balk21, author = {Balkenhol, Maschenka Ca and Ciompi, Francesco and Swiderska-Chadaj, Zaneta and van de Loo, Rob and Intezar, Milad and Otte-Holler, Irene and Geijs, Daan and Lotz, Johannes and Weiss, Nick and de Bel, Thomas and Litjens, Geert and Bult, Peter and van der Laak, Jeroen Awm}, title = {Optimized tumour infiltrating lymphocyte assessment for triple negative breast cancer prognostics.}, @@ -2022,6 +2314,9 @@ @article{Balk21 optnote = {DIAG}, pmid = {33640523}, year = {2021}, + ss_id = {25890a20503e79f773f427c999fcb41387f0aab1}, + all_ss_ids = {['25890a20503e79f773f427c999fcb41387f0aab1']}, + gscites = {17}, } @inproceedings{Balo11, @@ -2056,6 +2351,21 @@ @article{Balo13 gscites = {88}, } +@book{Balo17, + author = {Balocco, Simone and Ciompi, Francesco and Rigla, Juan and Carrillo, Xavier and Mauri, Josepa and Radeva, Petia}, + title = {~~Intra-coronary Stent Localization in Intravascular Ultrasound Sequences, A Preliminary Study}, + doi = {10.1007/978-3-319-67534-3_2}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-319-67534-3_2}, + file = {Balo17.pdf:pdf\\Balo17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Lecture Notes in Computer Science}, + automatic = {yes}, + citation-count = {2}, + pages = {12-19}, +} + @article{Balo18, author = {Balocco, Simone and Ciompi, Francesco and Rigla, Juan and Carrillo, Xavier and Mauri, Josepa and Radeva, Petia}, title = {Assessment Of Intra-coronary Stent Location And Extension In Intravascular Ultrasound Sequences}, @@ -2071,7 +2381,23 @@ @article{Balo18 pmid = {30383304}, month = {12}, gsid = {7215414788070039474}, - gscites = {1}, + gscites = {4}, + ss_id = {3a3664f9b7e6c4258d9dc282c9c43a95054c648e}, + all_ss_ids = {['3a3664f9b7e6c4258d9dc282c9c43a95054c648e']}, +} + +@inproceedings{Balt17, + author = {Balta, Christiana and Bouwman, Ramona W. and Sechopoulos, Ioannis and Broeders, Mireille J. M. and Karssemeijer, Nico and van Engen, Ruben E. and Veldkamp, Wouter J. H.}, + title = {~~Signal template generation from acquired mammographic images for the non-prewhitening model observer with eye-filter}, + doi = {10.1117/12.2254160}, + year = {2017}, + abstract = {Model observers (MOs) are being investigated for image quality assessment in full-field digital mammography (FFDM). Signal templates for the non-prewhitening MO with eye filter (NPWE) were formed using acquired FFDM images. A signal template was generated from acquired images by averaging multiple exposures resulting in a low noise signal template. Noise elimination while preserving the signal was investigated and a methodology which results in a noise-free template is proposed. In order to deal with signal location uncertainty, template shifting was implemented. The procedure to generate the template was evaluated on images of an anthropomorphic breast phantom containing microcalcification-related signals. Optimal reduction of the background noise was achieved without changing the signal. Based on a validation study in simulated images, the difference (bias) in MO performance from the ground truth signal was calculated and found to be <1%. As template generation is a building stone of the entire image quality assessment framework, the proposed method to construct templates from acquired images facilitates the use of the NPWE MO in acquired images.}, + url = {http://dx.doi.org/10.1117/12.2254160}, + file = {Balt17.pdf:pdf\\Balt17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2017: Image Perception, Observer Performance, and Technology Assessment}, + automatic = {yes}, + citation-count = {4}, } @article{Balt18, @@ -2089,7 +2415,26 @@ @article{Balt18 optnote = {AXTI, DIAG, RADIOLOGY}, pmid = {29193129}, gsid = {7340681759496680687}, - gscites = {9}, + gscites = {15}, + ss_id = {393450e398ef7775bd9a0300974d1e31636b5a26}, + all_ss_ids = {['393450e398ef7775bd9a0300974d1e31636b5a26']}, +} + +@article{Balt19a, + author = {Balta, C. and Bouwman, R. W. and Sechopoulos, I. and Broeders, M. J. M. and Karssemeijer, N. and van Engen, R. E. and Veldkamp, W. J. H.}, + title = {~~Can a channelized Hotelling observer assess image quality in acquired mammographic images of an anthropomorphic breast phantom including image processing?}, + doi = {10.1002/mp.13342}, + year = {2019}, + abstract = {PurposeTo study the feasibility of a channelized Hotelling observer (CHO) to predict human observer performance in detecting calcification-like signals in mammography images of an anthropomorphic breast phantom, as part of a quality control (QC) framework.MethodsA prototype anthropomorphic breast phantom with inserted gold disks of 0.25 mm diameter was imaged with two different digital mammography x-ray systems at four different dose levels. Regions of interest (ROIs) were extracted from the acquired processed and unprocessed images, signal-present and signal-absent. The ROIs were evaluated by a CHO using four different formulations of the difference of Gaussian (DoG) channel sets. Three human observers scored the ROIs in a two-alternative forced-choice experiment. We compared the human and the CHO performance on the simple task to detect calcification-like disks in ROIs with and without postprocessing. The proportion of correct responses of the human reader (PCH) and the CHO (PCCHO) was calculated and the correlation between the two was analyzed using a mixed-effect regression model. To address the signal location uncertainty, the impact of shifting the DoG channel sets in all directions up to two pixels was evaluated. Correlation results including the goodness of fit (r2) of PCH and PCCHO for all different parameters were evaluated.ResultsSubanalysis by system yielded strong correlations between PCH and PCCHO, with r2 between PCH and PCCHO was found to be between 0.926 and 0.958 for the unshifted and between 0.759 and 0.938 for the shifted channel sets, respectively. However, the linear fit suggested a slight system dependence. PCCHO with shifted channel sets increased CHO performance but the correlation with humans was decreased. These correlations were not considerably affected by of the DoG channel set used.ConclusionsThere is potential for the CHO to be used in QC for the evaluation of detectability of calcification-like signals. The CHO can predict the PC of humans in images of calcification-like signals of two different systems. However, a global model to be used for all systems requires further investigation.}, + url = {http://dx.doi.org/10.1002/mp.13342}, + file = {Balt19a.pdf:pdf\\Balt19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Physics}, + automatic = {yes}, + citation-count = {4}, + pages = {714-725}, + volume = {46}, + pmid = {30561108}, } @article{Balt19b, @@ -2110,6 +2455,7 @@ @article{Balt19b month = {9}, gsid = {8315844268599249140}, gscites = {1}, + all_ss_ids = {['20fe29e553477d9bd330f052e4d01c0c1c616b22', 'f131ef217543d179269018950bf3b6ba2b30f3b1']}, } @phdthesis{Balt19c, @@ -2126,6 +2472,20 @@ @phdthesis{Balt19c journal = {PhD thesis}, } +@inproceedings{Balt20, + author = {Balta, Christiana and Rodriguez-Ruiz, Alejandro and Mieskes, Christoph and Karssemeijer, Nico and Heywang-K\"{o}brunner, Sylvia H.}, + title = {~~Going from double to single reading for screening exams labeled as likely normal by AI: what is the impact?}, + doi = {10.1117/12.2564179}, + year = {2020}, + abstract = {We investigated whether a deep learning-based artificial intelligence (AI) system can be used to improve breast cancer screening workflow efficiency by making a pre-selection of likely normal screening mammograms where double-reading could be safely replaced with single-reading. We collected 18,015 consecutively acquired screening exams, the independent reading assessments by each radiologist of the double reading process, and the information about whether the case was recalled and if so the recall outcome. The AI system assigned a 1-10 score to each screening exam denoting the likelihood of cancer. We simulated the impact on recall rate, cancer detection rate, and workload if single-reading would have been performed for the mammograms with the lowest AI scores. After evaluating all possible AI score thresholds, it was found that when AI scores 1 to 7 are single read instead of double read, the cancer detection rate would have remained the same (no screen-detected cancers missed -the AI score is low but the single-reader would recall the exam), recall rate would have decreased by 11.8% (from 5.35% to 4.79%), and screen reading workload would have decreased by 32.6%. In conclusion, using an AI system could improve breast cancer screening efficiency by pre-selecting likely normal exams where double-reading might not be needed.}, + url = {http://dx.doi.org/10.1117/12.2564179}, + file = {Balt20.pdf:pdf\\Balt20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {15th International Workshop on Breast Imaging (IWBI2020)}, + automatic = {yes}, + citation-count = {12}, +} + @inproceedings{Band17, author = {P\'{e}ter B\'{a}ndi and Rob {van de Loo} and Milad Intezar and Daan Geijs and Francesco Ciompi and Bram {van Ginneken} and Jeroen {van der Laak} and Geert Litjens}, title = {Comparison of Different Methods for Tissue Segmentation In Histopathological Whole-Slide Images}, @@ -2135,11 +2495,13 @@ @inproceedings{Band17 url = {https://arxiv.org/abs/1703.05990}, abstract = {Tissue segmentation is an important pre-requisite for efficient and accurate diagnostics in digital pathology. However, it is well known that whole-slide scanners can fail in detecting all tissue regions, for example due to the tissue type, or due to weak staining because their tissue detection algorithms are not robust enough. In this paper, we introduce two different convolutional neural network architectures for whole slide image segmentation to accurately identify the tissue sections. We also compare the algorithms to a published traditional method. We collected 54 whole slide images with differing stains and tissue types from three laboratories to validate our algorithms. We show that while the two methods do not differ significantly they outperform their traditional counterpart (Jaccard index of 0.937 and 0.929 vs. 0.870, p < 0.01).}, file = {:pdf/Band17.pdf:PDF}, - gscites = {14}, + gscites = {34}, gsid = {564398722349753208}, month = {4}, optnote = {DIAG, PATHOLOGY}, year = {2017}, + ss_id = {418354adcafbc948c266c55564feb5b12cc3dc21}, + all_ss_ids = {['418354adcafbc948c266c55564feb5b12cc3dc21']}, } @article{Band18, @@ -2158,42 +2520,48 @@ @article{Band18 year = {2018}, month = {2}, gsid = {13054230002586416609}, - gscites = {54}, + gscites = {376}, + ss_id = {1b27b9cfe0ce17950b6ea72f9ef8cf5a7459bccd}, + all_ss_ids = {['1b27b9cfe0ce17950b6ea72f9ef8cf5a7459bccd']}, } -@Article{Band19a, - author = {B\'{a}ndi, P. and Balkenhol, Maschenka and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, - title = {Resolution-agnostic tissue segmentation in whole-slide histopathology images with convolutional neural networks}, - doi = {10.7717/peerj.8242}, - pages = {e8242}, - url = {https://peerj.com/articles/8242/}, - volume = {7}, +@article{Band19a, + author = {B\'{a}ndi, P. and Balkenhol, Maschenka and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, + title = {Resolution-agnostic tissue segmentation in whole-slide histopathology images with convolutional neural networks}, + doi = {10.7717/peerj.8242}, + pages = {e8242}, + url = {https://peerj.com/articles/8242/}, + volume = {7}, abstract = {Modern pathology diagnostics is being driven toward large scale digitization of microscopic tissue sections. A prerequisite for its safe implementation is the guarantee that all tissue present on a glass slide can also be found back in the digital image. Whole-slide scanners perform a tissue segmentation in a low resolution overview image to prevent inefficient high-resolution scanning of empty background areas. However, currently applied algorithms can fail in detecting all tissue regions. - In this study, we developed convolutional neural networks to distinguish tissue from background. We collected 100 whole-slide images of 10 tissue samples--staining categories from five medical centers for development and testing. Additionally, eight more images of eight unfamiliar categories were collected for testing only. We compared our fully-convolutional neural networks to three traditional methods on a range of resolution levels using Dice score and sensitivity. - We also tested whether a single neural network can perform equivalently to multiple networks, each specialized in a single resolution. Overall, our solutions outperformed the traditional methods on all the tested resolutions. The resolution-agnostic network achieved average Dice scores between 0.97 and 0.98 across the tested resolution levels, only 0.0069 less than the resolution-specific networks. Finally, its excellent generalization performance was demonstrated by achieving averages of 0.98 Dice score and 0.97 sensitivity on the eight unfamiliar images. A future study should test this network prospectively.}, - file = {Band19a.pdf:pdf\\Band19a.pdf:PDF}, - gscites = {2}, - gsid = {12477234465933785268}, - journal = PRJ, - month = {12}, - optnote = {DIAG, RADIOLOGY}, - pmid = {31871843}, - year = {2019}, -} - -@Article{Band23, - author = {B\'{a}ndi, P. and Balkenhol, Maschenka and van Dijk, Marcory and Kok, Michel and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, - title = {Continual learning strategies for cancer-independent detection of lymph node metastases}, - doi = {10.1016/j.media.2023.102755}, - pages = {102755}, - volume = {85}, - abstract = {Recently, large, high-quality public datasets have led to the development of convolutional neural networks that can detect lymph node metastases of breast cancer at the level of expert pathologists. Many cancers, regardless of the site of origin, can metastasize to lymph nodes. However, collecting and annotating high-volume, high-quality datasets for every cancer type is challenging. In this paper we investigate how to leverage existing high-quality datasets most efficiently in multi-task settings for closely related tasks. Specifically, we will explore different training and domain adaptation strategies, including prevention of catastrophic forgetting, for breast, colon and head-and-neck cancer metastasis detection in lymph nodes. + In this study, we developed convolutional neural networks to distinguish tissue from background. We collected 100 whole-slide images of 10 tissue samples--staining categories from five medical centers for development and testing. Additionally, eight more images of eight unfamiliar categories were collected for testing only. We compared our fully-convolutional neural networks to three traditional methods on a range of resolution levels using Dice score and sensitivity. + We also tested whether a single neural network can perform equivalently to multiple networks, each specialized in a single resolution. Overall, our solutions outperformed the traditional methods on all the tested resolutions. The resolution-agnostic network achieved average Dice scores between 0.97 and 0.98 across the tested resolution levels, only 0.0069 less than the resolution-specific networks. Finally, its excellent generalization performance was demonstrated by achieving averages of 0.98 Dice score and 0.97 sensitivity on the eight unfamiliar images. A future study should test this network prospectively.}, + file = {Band19a.pdf:pdf\\Band19a.pdf:PDF}, + journal = PRJ, + optnote = {DIAG, RADIOLOGY}, + pmid = {31871843}, + year = {2019}, + month = {12}, + gsid = {12477234465933785268}, + gscites = {37}, + ss_id = {e1170679c6b5fe0addb40cfb62076979a3b3ebec}, + all_ss_ids = {['e1170679c6b5fe0addb40cfb62076979a3b3ebec']}, +} -Our results show state-of-the-art performance on colon and head-and-neck cancer metastasis detection tasks. We show the effectiveness of adaptation of networks from one cancer type to another to obtain multi-task metastasis detection networks. Furthermore, we show that leveraging existing high-quality datasets can significantly boost performance on new target tasks and that catastrophic forgetting can be effectively mitigated. Last, we compare different mitigation strategies.}, - file = {Band23.pdf:pdf\\Band23.pdf:PDF}, - journal = {Medical Image Analysis}, - optnote = {DIAG, RADIOLOGY, PATHOLOGY}, - year = {2023}, +@article{Band23, + author = {B\'{a}ndi, P. and Balkenhol, Maschenka and van Dijk, Marcory and Kok, Michel and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, + title = {Continual learning strategies for cancer-independent detection of lymph node metastases}, + doi = {10.1016/j.media.2023.102755}, + pages = {102755}, + volume = {85}, + abstract = {Recently, large, high-quality public datasets have led to the development of convolutional neural networks that can detect lymph node metastases of breast cancer at the level of expert pathologists. Many cancers, regardless of the site of origin, can metastasize to lymph nodes. However, collecting and annotating high-volume, high-quality datasets for every cancer type is challenging. In this paper we investigate how to leverage existing high-quality datasets most efficiently in multi-task settings for closely related tasks. Specifically, we will explore different training and domain adaptation strategies, including prevention of catastrophic forgetting, for breast, colon and head-and-neck cancer metastasis detection in lymph nodes. + + Our results show state-of-the-art performance on colon and head-and-neck cancer metastasis detection tasks. We show the effectiveness of adaptation of networks from one cancer type to another to obtain multi-task metastasis detection networks. Furthermore, we show that leveraging existing high-quality datasets can significantly boost performance on new target tasks and that catastrophic forgetting can be effectively mitigated. Last, we compare different mitigation strategies.}, + file = {Band23.pdf:pdf\\Band23.pdf:PDF}, + journal = {Medical Image Analysis}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2023}, + all_ss_ids = {['4bcd672218ecec70473c84f6f1cc52c64031f3e5']}, + gscites = {4}, } @article{Bank07, @@ -2210,6 +2578,23 @@ @article{Bank07 month = {3}, } +@article{Bank17, + author = {Bankier, Alexander A. and MacMahon, Heber and Goo, Jin Mo and Rubin, Geoffrey D. and Schaefer-Prokop, Cornelia M. and Naidich, David P.}, + title = {~~Recommendations for Measuring Pulmonary Nodules at CT: A Statement from the Fleischner Society}, + doi = {10.1148/radiol.2017162894}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2017162894}, + file = {Bank17.pdf:pdf\\Bank17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + automatic = {yes}, + citation-count = {203}, + pages = {584-600}, + volume = {285}, + pmid = {28650738}, +} + @article{Bare96, author = {J. O. Barentsz and G. J. Jager and P. B. van Vierzen and J. A. Witjes and S. P. Strijk and H. Peters and N. Karssemeijer and S. H. Ruijs}, title = {Staging urinary bladder cancer after transurethral biopsy: value of fast dynamic contrast-enhanced {MR} imaging}, @@ -2224,6 +2609,8 @@ @article{Bare96 month = {10}, gsid = {10907411171717953315}, gscites = {222}, + ss_id = {328a63a444a23693caa473d92e726efc1503c680}, + all_ss_ids = {['328a63a444a23693caa473d92e726efc1503c680']}, } @article{Bare99, @@ -2243,6 +2630,8 @@ @article{Bare99 month = {9}, gsid = {9172514927993460045}, gscites = {172}, + ss_id = {46b888d39bbf82a9ff78a5bfb581b4ded6df46bc}, + all_ss_ids = {['46b888d39bbf82a9ff78a5bfb581b4ded6df46bc']}, } @article{Bart20, @@ -2258,6 +2647,38 @@ @article{Bart20 file = {Bart20.pdf:pdf\\Bart20.pdf:PDF}, abstract = {Vitamin K-dependent proteins are involved in (patho)physiological calcification of the vasculature and the bones. Type 2 diabetes mellitus (DM2) is associated with increased arterial calcification and increased fractures. This study investigates the effect of 6 months vitamin K2 supplementation on systemic arterial calcification and bone mineral density (BMD) in DM2 patients with a history of cardiovascular disease (CVD).}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/235803}, + ss_id = {dd270749c2da64ae1d99baa1c40bc904b28b892f}, + all_ss_ids = {['dd270749c2da64ae1d99baa1c40bc904b28b892f']}, + gscites = {24}, +} + +@article{Beau20, + author = {Beauferris, Youssef and Teuwen, Jonas and Karkalousos, Dimitrios and Moriakov, Nikita and Caan, Mattha and Yiasemis, George and Rodrigues, L\'{i}via and Lopes, Alexandre and Pedrini, H\'{e}lio and Rittner, Let\'{i}cia and Dannecker, Maik and Studenyak, Viktor and Gr\"{o}ger, Fabian and Vyas, Devendra and Faghih-Roohi, Shahrooz and Jethi, Amrit Kumar and Raju, Jaya Chandra and Sivaprakasam, Mohanasankar and Lasby, Mike and Nogovitsyn, Nikita and Loos, Wallace and Frayne, Richard and Souza, Roberto}, + title = {~~Multi-Coil MRI Reconstruction Challenge -- Assessing Brain MRI Reconstruction Models and their Generalizability to Varying Coil Configurations}, + doi = {10.48550/ARXIV.2011.07952}, + year = {2020}, + abstract = {Deep-learning-based brain magnetic resonance imaging (MRI) reconstruction methods have the potential to accelerate the MRI acquisition process. Nevertheless, the scientific community lacks appropriate benchmarks to assess MRI reconstruction quality of high-resolution brain images, and evaluate how these proposed algorithms will behave in the presence of small, but expected data distribution shifts. The Multi-Coil Magnetic Resonance Image (MC-MRI) Reconstruction Challenge provides a benchmark that aims at addressing these issues, using a large dataset of high-resolution, three-dimensional, T1-weighted MRI scans. The challenge has two primary goals: 1) to compare different MRI reconstruction models on this dataset and 2) to assess the generalizability of these models to data acquired with a different number of receiver coils. In this paper, we describe the challenge experimental design, and summarize the results of a set of baseline and state of the art brain MRI reconstruction models. We provide relevant comparative information on the current MRI reconstruction state-of-the-art and highlight the challenges of obtaining generalizable models that are required prior to broader clinical adoption. The MC-MRI benchmark data, evaluation code and current challenge leaderboard are publicly available. They provide an objective performance assessment for future developments in the field of brain MRI reconstruction.}, + url = {https://arxiv.org/abs/2011.07952}, + file = {Beau20.pdf:pdf\\Beau20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2011.07952}, + automatic = {yes}, +} + +@article{Beau22, + author = {Beauferris, Youssef and Teuwen, Jonas and Karkalousos, Dimitrios and Moriakov, Nikita and Caan, Matthan and Yiasemis, George and Rodrigues, L\'{i}via and Lopes, Alexandre and Pedrini, Helio and Rittner, Let\'{i}cia and Dannecker, Maik and Studenyak, Viktor and Gr\"{o}ger, Fabian and Vyas, Devendra and Faghih-Roohi, Shahrooz and Kumar Jethi, Amrit and Chandra Raju, Jaya and Sivaprakasam, Mohanasankar and Lasby, Mike and Nogovitsyn, Nikita and Loos, Wallace and Frayne, Richard and Souza, Roberto}, + title = {~~Multi-Coil MRI Reconstruction Challenge--Assessing Brain MRI Reconstruction Models and Their Generalizability to Varying Coil Configurations}, + doi = {10.3389/fnins.2022.919186}, + year = {2022}, + abstract = {Deep-learning-based brain magnetic resonance imaging (MRI) reconstruction methods have the potential to accelerate the MRI acquisition process. Nevertheless, the scientific community lacks appropriate benchmarks to assess the MRI reconstruction quality of high-resolution brain images, and evaluate how these proposed algorithms will behave in the presence of small, but expected data distribution shifts. The multi-coil MRI (MC-MRI) reconstruction challenge provides a benchmark that aims at addressing these issues, using a large dataset of high-resolution, three-dimensional, T1-weighted MRI scans. The challenge has two primary goals: (1) to compare different MRI reconstruction models on this dataset and (2) to assess the generalizability of these models to data acquired with a different number of receiver coils. In this paper, we describe the challenge experimental design and summarize the results of a set of baseline and state-of-the-art brain MRI reconstruction models. We provide relevant comparative information on the current MRI reconstruction state-of-the-art and highlight the challenges of obtaining generalizable models that are required prior to broader clinical adoption. The MC-MRI benchmark data, evaluation code, and current challenge leaderboard are publicly available. They provide an objective performance assessment for future developments in the field of brain MRI reconstruction.}, + url = {http://dx.doi.org/10.3389/fnins.2022.919186}, + file = {Beau22.pdf:pdf\\Beau22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Frontiers in Neuroscience}, + automatic = {yes}, + citation-count = {7}, + volume = {16}, + pmid = {35873808}, } @conference{Beck16, @@ -2266,14 +2687,14 @@ @conference{Beck16 booktitle = RSNA, year = {2016}, abstract = {PURPOSE: We aimed to evaluate the additional value of brain {CT} perfusion ({CTP}) for intracranial vessel occlusion detection in acute ischemic stroke for observers with different levels of experience. - - METHOD AND MATERIALS: We retrospectively included all patients with symptoms of acute ischemic stroke (onset of less than 9 hours) who were scanned with non-enhanced {CT} ({NECT}), {CT} angiography ({CTA}) and {CTP} in the year 2015. Four observers with different levels of experience (neuroradiologist, non-neuroradiologist, two radiology residents) evaluated the imaging data with 2 imaging strategies. Method 1 included {NECT} and {CTA}. For method 2, additional {CTP} maps were provided for the evaluation of intracranial vessel occlusion on {CTA}. The observers were blinded to patient identity and clinical outcome. Receiver operating characteristic ({ROC}) was used for the evaluation of accuracy in intracranial vessel occlusion detection. The reference standard of vessel occlusion was set based on the evaluation by the four observers, and the judgment of an independent neuroradiologist serving as a referee in case of discrepancy. - - RESULTS: In total 110 patients were included, preliminary analyses included 94 patients. There was an increase of {AUC} in the overall detection of intracranial vessel occlusion for observer 1, 3 and 4, though only for observer 1 the increase in {AUC} was statistically significant (p=0.041). Increase of intracranial vessel occlusion detection mainly concerned distal vessel occlusions. No significant added value of {CTP} was found for proximal vessel occlusions, with already a high accuracy based on {NECT} and {CTA} for all experience levels with sensitivity ranging between 86-94% and specificity between 92-100%. - - CONCLUSION: Our study demonstrates that the use of {CTP} can aid in the detection of distal intracranial vessel occlusions on {CTA} in case {CTP} is integrated in the reading strategy. It is also demonstrated that {CTP} was not of added value for the detection of proximal intracranial vessel occlusions. Finally, there was no major difference in the diagnostic accuracy of intracranial vessel occlusion detection for the different levels in experience of the observers. - - CLINICAL RELEVANCE/APPLICATION: Our study demonstrated that brain {CT} perfusion can aid in the detection of distal intracranial vessel occlusions, which is clinically relevant for optimizing the imaging strategy in acute ischemic stroke.}, + + METHOD AND MATERIALS: We retrospectively included all patients with symptoms of acute ischemic stroke (onset of less than 9 hours) who were scanned with non-enhanced {CT} ({NECT}), {CT} angiography ({CTA}) and {CTP} in the year 2015. Four observers with different levels of experience (neuroradiologist, non-neuroradiologist, two radiology residents) evaluated the imaging data with 2 imaging strategies. Method 1 included {NECT} and {CTA}. For method 2, additional {CTP} maps were provided for the evaluation of intracranial vessel occlusion on {CTA}. The observers were blinded to patient identity and clinical outcome. Receiver operating characteristic ({ROC}) was used for the evaluation of accuracy in intracranial vessel occlusion detection. The reference standard of vessel occlusion was set based on the evaluation by the four observers, and the judgment of an independent neuroradiologist serving as a referee in case of discrepancy. + + RESULTS: In total 110 patients were included, preliminary analyses included 94 patients. There was an increase of {AUC} in the overall detection of intracranial vessel occlusion for observer 1, 3 and 4, though only for observer 1 the increase in {AUC} was statistically significant (p=0.041). Increase of intracranial vessel occlusion detection mainly concerned distal vessel occlusions. No significant added value of {CTP} was found for proximal vessel occlusions, with already a high accuracy based on {NECT} and {CTA} for all experience levels with sensitivity ranging between 86-94% and specificity between 92-100%. + + CONCLUSION: Our study demonstrates that the use of {CTP} can aid in the detection of distal intracranial vessel occlusions on {CTA} in case {CTP} is integrated in the reading strategy. It is also demonstrated that {CTP} was not of added value for the detection of proximal intracranial vessel occlusions. Finally, there was no major difference in the diagnostic accuracy of intracranial vessel occlusion detection for the different levels in experience of the observers. + + CLINICAL RELEVANCE/APPLICATION: Our study demonstrated that brain {CT} perfusion can aid in the detection of distal intracranial vessel occlusions, which is clinically relevant for optimizing the imaging strategy in acute ischemic stroke.}, optnote = {DIAG, RADIOLOGY}, } @@ -2287,18 +2708,20 @@ @article{Beck19a pages = {124-129}, doi = {10.1016/j.neurad.2018.03.003}, abstract = {Background and purpose: To evaluate whether brain CT perfusion (CTP) aids in the detection of intracranial vessel occlusion on CT angiography (CTA) in acute ischemic stroke. - - Materials and methods: Medical-ethical committee approval of our hospital was obtained and informed consent was waived. Patients suspected of acute ischemic stroke who underwent non-contrast CT(NCCT), CTA and whole-brain CTP in our center in the year 2015 were included. Three observers with different levels of experience evaluated the imaging data of 110 patients for the presence or absence of intracranial arterial vessel occlusion with two strategies. In the first strategy, only NCCT and CTA were available. In the second strategy, CTP maps were provided in addition to NCCT and CTA. Receiver-operating-characteristic (ROC) analysis was used for the evaluation of diagnostic accuracy. - - Results: Overall, a brain perfusion deficit was scored present in 87-89% of the patients with an intracranial vessel occlusion, more frequently observed in the anterior than in the posterior circulation. Performance of intracranial vessel occlusion detection on CTA was significantly improved with the availability of CTP maps as compared to the first strategy (P = 0.023), due to improved detection of distal and posterior circulation vessel occlusions (P-values of 0.032 and 0.003 respectively). No added value of CTP was found for intracranial proximal vessel occlusion detection, with already high accuracy based on NCCT and CTA alone. - - Conclusion: The performance of intracranial vessel occlusion detection on CTA was improved with the availability of brain CT perfusion maps due to the improved detection of distal and posterior circulation vessel occlusions.}, + + Materials and methods: Medical-ethical committee approval of our hospital was obtained and informed consent was waived. Patients suspected of acute ischemic stroke who underwent non-contrast CT(NCCT), CTA and whole-brain CTP in our center in the year 2015 were included. Three observers with different levels of experience evaluated the imaging data of 110 patients for the presence or absence of intracranial arterial vessel occlusion with two strategies. In the first strategy, only NCCT and CTA were available. In the second strategy, CTP maps were provided in addition to NCCT and CTA. Receiver-operating-characteristic (ROC) analysis was used for the evaluation of diagnostic accuracy. + + Results: Overall, a brain perfusion deficit was scored present in 87-89% of the patients with an intracranial vessel occlusion, more frequently observed in the anterior than in the posterior circulation. Performance of intracranial vessel occlusion detection on CTA was significantly improved with the availability of CTP maps as compared to the first strategy (P = 0.023), due to improved detection of distal and posterior circulation vessel occlusions (P-values of 0.032 and 0.003 respectively). No added value of CTP was found for intracranial proximal vessel occlusion detection, with already high accuracy based on NCCT and CTA alone. + + Conclusion: The performance of intracranial vessel occlusion detection on CTA was improved with the availability of brain CT perfusion maps due to the improved detection of distal and posterior circulation vessel occlusions.}, file = {pdf\\Beck19a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {29625153}, month = {3}, gsid = {6771048124161780993}, - gscites = {15}, + gscites = {58}, + ss_id = {5a7fca54156869246045d31ebf54ff10408deccb}, + all_ss_ids = {['5a7fca54156869246045d31ebf54ff10408deccb']}, } @book{Beic11, @@ -2329,7 +2752,7 @@ @book{Beic16 publisher = {CreateSpace.com}, url = {http://www.amazon.com/Sixth-International-Workshop-Pulmonary-Analysis/dp/1537038583}, abstract = {These are the proceedings of the sixth edition of the International Workshop on Pulmonary Image Analysis, held in conjunction with the Medical Image Computing and Computer Assisted Intervention (MICCAI) Conference 2016 in Athens, Greece. The International Workshop on Pulmonary Image Analysis brings together researchers in pulmonary image analysis to discuss new developments in the field. For the sixth edition of the workshop, all submitted papers received thorough reviews by at least three reviewers. In total, eight papers were accepted for presentation at the workshop, of which five were selected for oral presentation and three for poster presentation. The presented papers deal with di - erent aspects of pulmonary image analysis, including computer aided diagnosis, segmentation, and registration. We would like to thank the organizers of MICCAI 2016 for hosting the sixth edition of the International Workshop on Pulmonary Image Analysis and for handling the logistics of the workshop, and all colleagues involved in the peer-review process.}, + erent aspects of pulmonary image analysis, including computer aided diagnosis, segmentation, and registration. We would like to thank the organizers of MICCAI 2016 for hosting the sixth edition of the International Workshop on Pulmonary Image Analysis and for handling the logistics of the workshop, and all colleagues involved in the peer-review process.}, file = {Beic16.pdf:pdf/Beic16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, year = {2016}, @@ -2360,8 +2783,10 @@ @inproceedings{Bejn14 optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {12432893429390034088}, - gscites = {20}, + gscites = {39}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/132653}, + ss_id = {e39a5aa36977c617a9caecb6da5c87e27886ad6f}, + all_ss_ids = {['e39a5aa36977c617a9caecb6da5c87e27886ad6f']}, } @inproceedings{Bejn15, @@ -2378,6 +2803,9 @@ @inproceedings{Bejn15 optnote = {DIAG, RADIOLOGY}, month = {3}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/141326}, + ss_id = {49d5e6ed83859117cfd781612878ea93fa75c348}, + all_ss_ids = {['49d5e6ed83859117cfd781612878ea93fa75c348']}, + gscites = {46}, } @inproceedings{Bejn17, @@ -2394,7 +2822,9 @@ @inproceedings{Bejn17 pmid = {28230532}, month = {4}, gsid = {574730569234442494}, - gscites = {38}, + gscites = {62}, + ss_id = {3096798a92ab0829ceb1d601d002dd47dc49e156}, + all_ss_ids = {['3096798a92ab0829ceb1d601d002dd47dc49e156']}, } @phdthesis{Bejn17a, @@ -2402,11 +2832,11 @@ @phdthesis{Bejn17a title = {Histopathological diagnosis of breast cancer using machine learning}, url = {https://repository.ubn.ru.nl/handle/2066/178907}, abstract = {Application of machine learning to WSI is a promising yet largely unexplored field of research. The primary aim of the research described in this thesis was to develop automated systems for analysis of H&E stained breast histopathological images. This involved automatic detection of ductal carcinoma in-situ (DCIS), invasive, and metastatic breast cancer in whole-slide histopathological images. A secondary aim was to identify new diagnostic biomarkers for the detection of invasive breast cancer. To this end the research was undertaken with the following objectives: - - 1. Development of an algorithm for standardization of H&E stained WSIs; - 2. Detection, classification and segmentation of primary breast cancer; - 3. Evaluation of the state of the art of machine learning algorithms for automatic detection of lymph nodes metastases; - 4. Identifying and leveraging new stromal biomarkers to improve breast cancer diagnostics.}, + + 1. Development of an algorithm for standardization of H&E stained WSIs; + 2. Detection, classification and segmentation of primary breast cancer; + 3. Evaluation of the state of the art of machine learning algorithms for automatic detection of lymph nodes metastases; + 4. Identifying and leveraging new stromal biomarkers to improve breast cancer diagnostics.}, copromotor = {J.A.W.M. van der Laak and G. Litjens}, file = {Bejn17a.pdf:pdf\\Bejn17a.pdf:PDF}, optnote = {DIAG}, @@ -2431,8 +2861,10 @@ @article{Bejn17b optnote = {DIAG, RADIOLOGY}, pmid = {29285517}, gsid = {10616853499280907465}, - gscites = {75}, + gscites = {144}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/181885}, + ss_id = {784906ab63c3743a5d26dca846a49c2edbf4dc6a}, + all_ss_ids = {['784906ab63c3743a5d26dca846a49c2edbf4dc6a']}, } @article{Bejn18a, @@ -2449,7 +2881,37 @@ @article{Bejn18a optnote = {DIAG, RADIOLOGY}, pmid = {29710158}, gsid = {2786338949526955716}, - gscites = {1}, + gscites = {5}, + ss_id = {afe7e342756fb74d5cf7e4ad171415470a5baa3b}, + all_ss_ids = {['afe7e342756fb74d5cf7e4ad171415470a5baa3b']}, +} + +@inproceedings{Bel18, + author = {de Bel, Thomas and Hermsen, Meyke and van der Laak, Jeroen and Litjens, Geert J. S. and Smeets, Bart and Hilbrands, Luuk}, + title = {~~Automatic segmentation of histopathological slides of renal tissue using deep learning}, + doi = {10.1117/12.2293717}, + year = {2018}, + abstract = {Diagnoses in kidney disease often depend on quantification and presence of specific structures in the tissue. The progress in the field of whole-slide imaging and deep learning has opened up new possibilities for automatic analysis of histopathological slides. An initial step for renal tissue assessment is the differentiation and segmentation of relevant tissue structures in kidney specimens. We propose a method for segmentation of renal tissue using convolutional neural networks. Nine structures found in (pathological) renal tissue are included in the segmentation task: glomeruli, proximal tubuli, distal tubuli, arterioles, capillaries, sclerotic glomeruli, atrophic tubuli, in ammatory infiltrate and fibrotic tissue. Fifteen whole slide images of normal cortex originating from tumor nephrectomies were collected at the Radboud University Medical Center, Nijmegen, The Netherlands. The nine classes were sparsely annotated by a PhD student, experienced in the field of renal histopathology (MH). Experiments were performed with three different network architectures: a fully convolutional network, a multi-scale fully convolutional network and a U-net. We assessed the added benefit of combining the networks into an ensemble. We performed four-fold cross validation and report the average pixel accuracy per annotation for each class. Results show that convolutional neural net- works are able to accurately perform segmentation tasks in renal tissue, with accuracies of 90% for most classes.}, + url = {http://dx.doi.org/10.1117/12.2293717}, + file = {Bel18.pdf:pdf\\Bel18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2018: Digital Pathology}, + automatic = {yes}, + citation-count = {22}, +} + +@inproceedings{Bel18, + author = {de Bel, Thomas and Hermsen, Meyke and van der Laak, Jeroen and Litjens, Geert J. S. and Smeets, Bart and Hilbrands, Luuk}, + title = {~~Automatic segmentation of histopathological slides of renal tissue using deep learning}, + doi = {10.1117/12.2293717}, + year = {2018}, + abstract = {Diagnoses in kidney disease often depend on quantification and presence of specific structures in the tissue. The progress in the field of whole-slide imaging and deep learning has opened up new possibilities for automatic analysis of histopathological slides. An initial step for renal tissue assessment is the differentiation and segmentation of relevant tissue structures in kidney specimens. We propose a method for segmentation of renal tissue using convolutional neural networks. Nine structures found in (pathological) renal tissue are included in the segmentation task: glomeruli, proximal tubuli, distal tubuli, arterioles, capillaries, sclerotic glomeruli, atrophic tubuli, in ammatory infiltrate and fibrotic tissue. Fifteen whole slide images of normal cortex originating from tumor nephrectomies were collected at the Radboud University Medical Center, Nijmegen, The Netherlands. The nine classes were sparsely annotated by a PhD student, experienced in the field of renal histopathology (MH). Experiments were performed with three different network architectures: a fully convolutional network, a multi-scale fully convolutional network and a U-net. We assessed the added benefit of combining the networks into an ensemble. We performed four-fold cross validation and report the average pixel accuracy per annotation for each class. Results show that convolutional neural net- works are able to accurately perform segmentation tasks in renal tissue, with accuracies of 90% for most classes.}, + url = {http://dx.doi.org/10.1117/12.2293717}, + file = {Bel18.pdf:pdf\\Bel18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2018: Digital Pathology}, + automatic = {yes}, + citation-count = {22}, } @inproceedings{Bel19, @@ -2462,7 +2924,9 @@ @inproceedings{Bel19 file = {Bel19.pdf:pdf\\Bel19.pdf:PDF}, optnote = {DIAG}, gsid = {8777609378172049761}, - gscites = {9}, + gscites = {71}, + ss_id = {ffab09ba8100420dbd066f31c830f7d3e984696b}, + all_ss_ids = {['ffab09ba8100420dbd066f31c830f7d3e984696b']}, } @article{Bel21, @@ -2478,6 +2942,100 @@ @article{Bel21 pmid = {33647784}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/233603}, + ss_id = {455c14205e23d9cb709371ceb605f92e4fbbda2f}, + all_ss_ids = {['455c14205e23d9cb709371ceb605f92e4fbbda2f']}, + gscites = {50}, +} + +@article{Bel22, + author = {de Bel, Thomas and Litjens, Geert and Ogony, Joshua and Stallings-Mann, Melody and Carter, Jodi M. and Hilton, Tracy and Radisky, Derek C. and Vierkant, Robert A. and Broderick, Brendan and Hoskin, Tanya L. and Winham, Stacey J. and Frost, Marlene H. and Visscher, Daniel W. and Allers, Teresa and Degnim, Amy C. and Sherman, Mark E. and van der Laak, Jeroen A. W. M.}, + title = {~~Automated quantification of levels of breast terminal duct lobular (TDLU) involution using deep learning}, + doi = {10.1038/s41523-021-00378-7}, + year = {2022}, + abstract = {AbstractConvolutional neural networks (CNNs) offer the potential to generate comprehensive quantitative analysis of histologic features. Diagnostic reporting of benign breast disease (BBD) biopsies is usually limited to subjective assessment of the most severe lesion in a sample, while ignoring the vast majority of tissue features, including involution of background terminal duct lobular units (TDLUs), the structures from which breast cancers arise. Studies indicate that increased levels of age-related TDLU involution in BBD biopsies predict lower breast cancer risk, and therefore its assessment may have potential value in risk assessment and management. However, assessment of TDLU involution is time-consuming and difficult to standardize and quantitate. Accordingly, we developed a CNN to enable automated quantitative measurement of TDLU involution and tested its performance in 174 specimens selected from the pathology archives at Mayo Clinic, Rochester, MN. The CNN was trained and tested on a subset of 33 biopsies, delineating important tissue types. Nine quantitative features were extracted from delineated TDLU regions. Our CNN reached an overall dice-score of 0.871 (+-0.049) for tissue classes versus reference standard annotation. Consensus of four reviewers scoring 705 images for TDLU involution demonstrated substantial agreement with the CNN method (unweighted kappa = 0.747 +- 0.01). Quantitative involution measures showed anticipated associations with BBD histology, breast cancer risk, breast density, menopausal status, and breast cancer risk prediction scores (p < 0.05). Our work demonstrates the potential to improve risk prediction for women with BBD biopsies by applying CNN approaches to generate automated quantitative evaluation of TDLU involution.}, + url = {http://dx.doi.org/10.1038/s41523-021-00378-7}, + file = {Bel22.pdf:pdf\\Bel22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {npj Breast Cancer}, + automatic = {yes}, + citation-count = {5}, + volume = {8}, + pmid = {35046392}, +} + +@article{Bel22, + author = {de Bel, Thomas and Litjens, Geert and Ogony, Joshua and Stallings-Mann, Melody and Carter, Jodi M. and Hilton, Tracy and Radisky, Derek C. and Vierkant, Robert A. and Broderick, Brendan and Hoskin, Tanya L. and Winham, Stacey J. and Frost, Marlene H. and Visscher, Daniel W. and Allers, Teresa and Degnim, Amy C. and Sherman, Mark E. and van der Laak, Jeroen A. W. M.}, + title = {~~Automated quantification of levels of breast terminal duct lobular (TDLU) involution using deep learning}, + doi = {10.1038/s41523-021-00378-7}, + year = {2022}, + abstract = {AbstractConvolutional neural networks (CNNs) offer the potential to generate comprehensive quantitative analysis of histologic features. Diagnostic reporting of benign breast disease (BBD) biopsies is usually limited to subjective assessment of the most severe lesion in a sample, while ignoring the vast majority of tissue features, including involution of background terminal duct lobular units (TDLUs), the structures from which breast cancers arise. Studies indicate that increased levels of age-related TDLU involution in BBD biopsies predict lower breast cancer risk, and therefore its assessment may have potential value in risk assessment and management. However, assessment of TDLU involution is time-consuming and difficult to standardize and quantitate. Accordingly, we developed a CNN to enable automated quantitative measurement of TDLU involution and tested its performance in 174 specimens selected from the pathology archives at Mayo Clinic, Rochester, MN. The CNN was trained and tested on a subset of 33 biopsies, delineating important tissue types. Nine quantitative features were extracted from delineated TDLU regions. Our CNN reached an overall dice-score of 0.871 (+-0.049) for tissue classes versus reference standard annotation. Consensus of four reviewers scoring 705 images for TDLU involution demonstrated substantial agreement with the CNN method (unweighted kappa = 0.747 +- 0.01). Quantitative involution measures showed anticipated associations with BBD histology, breast cancer risk, breast density, menopausal status, and breast cancer risk prediction scores (p < 0.05). Our work demonstrates the potential to improve risk prediction for women with BBD biopsies by applying CNN approaches to generate automated quantitative evaluation of TDLU involution.}, + url = {http://dx.doi.org/10.1038/s41523-021-00378-7}, + file = {Bel22.pdf:pdf\\Bel22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {npj Breast Cancer}, + automatic = {yes}, + citation-count = {5}, + volume = {8}, + pmid = {35046392}, +} + +@article{Bell18, + author = {Belli, Davide and Hu, Shi and Sogancioglu, Ecem and van Ginneken, Bram}, + title = {~~Context Encoding Chest X-rays}, + doi = {10.48550/ARXIV.1812.00964}, + year = {2018}, + abstract = {Chest X-rays are one of the most commonly used technologies for medical diagnosis. Many deep learning models have been proposed to improve and automate the abnormality detection task on this type of data. In this paper, we propose a different approach based on image inpainting under adversarial training first introduced by Goodfellow et al. We configure the context encoder model for this task and train it over 1.1M 128x128 images from healthy X-rays. The goal of our model is to reconstruct the missing central 64x64 patch. Once the model has learned how to inpaint healthy tissue, we test its performance on images with and without abnormalities. We discuss and motivate our results considering PSNR, MSE and SSIM scores as evaluation metrics. In addition, we conduct a 2AFC observer study showing that in half of the times an expert is unable to distinguish real images from the ones reconstructed using our model. By computing and visualizing the pixel-wise difference between the source and the reconstructed images, we can highlight abnormalities to simplify further detection and classification tasks.}, + url = {https://arxiv.org/abs/1812.00964}, + file = {Bell18.pdf:pdf\\Bell18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:1812.00964}, + automatic = {yes}, +} + +@article{Bemp22, + author = {Van den Bempt, Maxim and Vinayahalingam, Shankeeth and Han, Michael D. and Berge, Stefaan J. and Xi, Tong}, + title = {The role of muscular traction in the occurrence of skeletal relapse after advancement bilateral sagittal split osteotomy (BSSO): A systematic review.}, + doi = {10.1111/ocr.12488}, + issue = {1}, + pages = {1--13}, + volume = {25}, + abstract = {The aim of this systematic review was (i) to determine the role of muscular traction in the occurrence of skeletal relapse after advancement BSSO and (ii) to investigate the effect of advancement BSSO on the perimandibular muscles. This systematic review reports in accordance with the recommendations proposed by the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) statement. Electronic database searches were performed in the databases MEDLINE, Embase and Cochrane Library. Inclusion criteria were as follows: assessment of relapse after advancement BSSO; assessment of morphological and functional change of the muscles after advancement BSSO; and clinical studies on human subjects. Exclusion criteria were as follows: surgery other than advancement BSSO; studies in which muscle activity/traction was not investigated; and case reports with a sample of five cases or fewer, review articles, meta-analyses, letters, congress abstracts or commentaries. Of the initial 1006 unique articles, 11 studies were finally included. In four studies, an intervention involving the musculature was performed with subsequent assessment of skeletal relapse. The changes in the morphological and functional properties of the muscles after BSSO were studied in seven studies. The findings of this review demonstrate that the perimandibular musculature plays a role in skeletal relapse after advancement BSSO and may serve as a target for preventive strategies to reduce this complication. However, further research is necessary to (i) develop a better understanding of the role of each muscle group, (ii) to develop new therapeutic strategies and (iii) to define criteria that allow identification of patients at risk.}, + file = {Bemp22.pdf:pdf\\Bemp22.pdf:PDF}, + journal = {Orthodontics & craniofacial research}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33938136}, + year = {2022}, +} + +@article{Berb22, + author = {Berb\'{i}s, M Alvaro and McClintock, David S. and Bychkov, Andrey and Cheng, Jerome Y and Delahunt, Brett and Egevad, Lars and Eloy, Catarina and Farris, Alton B and Fraggetta, Filippo and Garc\'{i}a del Moral, Raimundo and Hartman, Douglas J. and Herrmann, Markus D and Hollemans, Eva and Iczkowski, Kenneth A and Karsan, Aly and Kriegsmann, Mark and Lennerz, Jochen K and Pantanowitz, Liron and Salama, Mohamed E. and Sinard, John and Tuthill, Mark and Van der Laak, Jeroen and Williams, Bethany and Casado-S\'{a}nchez, C\'{e}sar and S\'{a}nchez-Turri\'{o}n, V\'{i}ctor and Luna, Antonio and Aneiros-Fern\'{a}ndez, Jos\'{e} and Shen, Jeanne}, + title = {~~The future of computational pathology: expectations regarding the anticipated role of artificial intelligence in pathology by 2030}, + doi = {10.1101/2022.09.02.22279476}, + year = {2022}, + abstract = {ABSTRACTBackgroundArtificial intelligence (AI) is rapidly fueling a fundamental transformation in the practice of pathology. However, AI's clinical integration remains challenging, with no AI algorithms to date enjoying routine adoption within typical anatomic pathology (AP) laboratories. This survey gathered current expert perspectives and expectations regarding the role of AI in AP from those with first-hand computational pathology and AI experience.MethodsPerspectives were solicited using the Delphi method from 24 subject matter experts between December 2020 and February 2021 regarding the anticipated role of AI in pathology by the year 2030. The study consisted of three consecutive rounds: 1) an open-ended, free response questionnaire generating a list of survey items; 2) a Likert-scale survey scored by experts and analyzed for consensus; and 3) a repeat survey of items not reaching consensus to obtain further expert consensus.FindingsConsensus opinions were reached on 141 of 180 survey items (78.3%). Experts agreed that AI would be routinely and impactfully used within AP laboratory and pathologist clinical workflows by 2030. High consensus was reached on 100 items across nine categories encompassing the impact of AI on (1) pathology key performance indicators (KPIs) and (2) the pathology workforce and specific tasks performed by (3) pathologists and (4) AP lab technicians, as well as (5) specific AI applications and their likelihood of routine use by 2030, (6) AI's role in integrated diagnostics, (7) pathology tasks likely to be fully automated using AI, and (8) regulatory/legal and (9) ethical aspects of AI integration in pathology.InterpretationThis is the first systematic consensus study detailing the expected short/mid-term impact of AI on pathology practice. These findings provide timely and relevant information regarding future care delivery in pathology and raise key practical, ethical, and legal challenges that must be addressed prior to AI's successful clinical implementation.FundingThis research received no specific grant from any funding agency in the public, commercial, or not-for-profit sectors.}, + url = {http://dx.doi.org/10.1101/2022.09.02.22279476}, + file = {Berb22.pdf:pdf\\Berb22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Preprint}, + automatic = {yes}, + citation-count = {1}, +} + +@article{Berb23, + author = {Berb\'{i}s, M. Alvaro and McClintock, David S. and Bychkov, Andrey and Van der Laak, Jeroen and Pantanowitz, Liron and Lennerz, Jochen K. and Cheng, Jerome Y. and Delahunt, Brett and Egevad, Lars and Eloy, Catarina and Farris, Alton B. and Fraggetta, Filippo and Garc\'{i}a del Moral, Raimundo and Hartman, Douglas J. and Herrmann, Markus D. and Hollemans, Eva and Iczkowski, Kenneth A. and Karsan, Aly and Kriegsmann, Mark and Salama, Mohamed E. and Sinard, John H. and Tuthill, J. Mark and Williams, Bethany and Casado-S\'{a}nchez, C\'{e}sar and S\'{a}nchez-Turri\'{o}n, V\'{i}ctor and Luna, Antonio and Aneiros-Fern\'{a}ndez, Jos\'{e} and Shen, Jeanne}, + title = {~~Computational pathology in 2030: a Delphi study forecasting the role of AI in pathology within the next decade}, + doi = {10.1016/j.ebiom.2022.104427}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ebiom.2022.104427}, + file = {Berb23.pdf:pdf\\Berb23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {eBioMedicine}, + automatic = {yes}, + citation-count = {13}, + pages = {104427}, + volume = {88}, + pmid = {36603288}, } @article{Berg18, @@ -2495,6 +3053,64 @@ @article{Berg18 file = {Berg18.pdf:pdf\\Berg18.pdf:PDF}, optnote = {DIAG}, pmid = {30355195}, + ss_id = {966107ec26f4d315bb9b7cbbbb0e60a4534e998c}, + all_ss_ids = {['966107ec26f4d315bb9b7cbbbb0e60a4534e998c']}, + gscites = {3}, +} + +@article{Berg19, + author = {Bergkamp, Mayra I. and Tuladhar, Anil M. and van der Holst, Helena M. and van Leijsen, Esther M.C. and Ghafoorian, Mohsen and van Uden, Ingeborg W.M. and van Dijk, Ewoud J. and Norris, David G. and Platel, Bram and Esselink, Rianne A.J. and Leeuw, Frank-Erik de}, + title = {~~Brain atrophy and strategic lesion location increases risk of parkinsonism in cerebral small vessel disease}, + doi = {10.1016/j.parkreldis.2018.11.010}, + year = {2019}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.parkreldis.2018.11.010}, + file = {Berg19.pdf:pdf\\Berg19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Parkinsonism & Related Disorders}, + automatic = {yes}, + citation-count = {2}, + pages = {94-100}, + volume = {61}, + pmid = {30448096}, +} + +@article{Berg22, + author = {Bergshoeff, Verona E. and Balkenhol, Maschenka C. A. and Haesevoets, Annick and Ruland, Andrea and Chenault, Michelene N. and Nelissen, Rik C. and Peutz, Carine J. and Clarijs, Ruud and Van der Laak, Jeroen A. W. M. and Takes, Robert P. and Van den Brekel, Michiel W. and Van Velthuysen, Marie-Louise F. and Ramaekers, Frans C. S. and Kremer, Bernd and Speel, Ernst-Jan M.}, + title = {~~Evaluation Criteria for Chromosome Instability Detection by FISH to Predict Malignant Progression in Premalignant Glottic Laryngeal Lesions}, + doi = {10.3390/cancers14133260}, + year = {2022}, + abstract = {Background: The definition of objective, clinically applicable evaluation criteria for FISH 1c/7c in laryngeal precursor lesions for the detection of chromosome instability (CI). Copy Number Variations (CNV) for chromosomes 1 and 7 reflect the general ploidy status of premalignant head and neck lesions and can therefore be used as a marker for CI. Methods: We performed dual-target FISH for chromosomes 1 and 7 centromeres on 4 um formalin-fixed, paraffin-embedded tissue sections of 87 laryngeal premalignancies to detect CNVs. Thirty-five normal head and neck squamous cell samples were used as a control. First, the chromosome 7:1 ratio (CR) was evaluated per lesion. The normal range of CRs (>=0.84 <= 1.16) was based on the mean CR +/- 3 x SD found in the normal population. Second, the percentage of aberrant nuclei, harboring > 2 chromosomes of chromosome 1 and/or 7 (PAN), was established (cut-off value for abnormal PAN >= 10%). Results: PAN showed a stronger correlation with malignant progression than CR (resp. OR 5.6, p = 0.001 and OR 3.8, p = 0.009). PAN combined with histopathology resulted in a prognostic model with an area under the ROC curve (AUC) of 0.75 (s.e. 0.061, sensitivity 71%, specificity 70%). Conclusions: evaluation criteria for FISH 1c/7c based on PAN >= 10% provide the best prognostic information on the risk of malignant progression of premalignant laryngeal lesions as compared with criteria based on the CR. FISH 1c/7c detection can be applied in combination with histopathological assessment.}, + url = {http://dx.doi.org/10.3390/cancers14133260}, + file = {Berg22.pdf:pdf\\Berg22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancers}, + automatic = {yes}, + citation-count = {0}, + pages = {3260}, + volume = {14}, + pmid = {35805032}, +} + +@article{Berg23, + title = {ChatGPT and Generating a Differential Diagnosis Early in an Emergency Department Presentation}, + journal = {Annals of Emergency Medicine}, + doi = {https://doi.org/10.1016/j.annemergmed.2023.08.003}, + url = {https://www.sciencedirect.com/science/article/pii/S019606442300642X}, + author = {Hidde ten Berg and Bram van Bakel and Lieke van de Wouw and Kim E. Jie and Anoeska Schipper and Henry Jansen and Rory D. O'Connor and Bram van Ginneken and Steef Kurstjens}, + citation-count = {0}, + optnote = {DIAG, RADIOLOGY}, + pmid = {37690022}, + year = {2023}, +} + +@conference{Berk22, + author = {van den Berk, I.A.H. and Jacobs, C. and Kanglie, M.M.N.P. and Mets, O.M. and Snoeren, M. and Montauban van Swijndregt, A.D. and Taal, E.M. and van Engelen, T.S.R. and Prins, J. and Bipat, S. and Bossuyt, P.M.M. and Stoker, J.}, + title = {Added value of artificial intelligence for the detection and analysis of lung nodules on ultra-low-dose CT in an emergency setting}, + booktitle = RSNA, + year = {2022}, + abstract = {PURPOSE: To analyze the added value of an artificial intelligence (AI) algorithm for lung nodule detection on ultra-low-dose CT's (ULDCT) acquired at the emergency department (ED). MATERIALS AND METHODS: In the OPTIMACT trial 873 patients with suspected non-traumatic pulmonary disease underwent ULDCT at the ED, 870 patients were available for analysis. During the trial clinical reading of the ULDCT's was done by the radiologist on call and clinical relevant incidental lung nodules were reported in the radiology report. All ULDCT's were processed using CIRRUS Lung Nodule AI, a deep learning based algorithm for detection of non-calcified lung nodules >= 6 mm. Three chest radiologists independently reviewed the lung nodules identified during the trial and all marks from the AI algorithm. Each AI mark was accepted or rejected for being a lung nodule. Accepted AI marks were classified as solid, part-solid, non-solid and were volumetrically measured using semi-automatic segmentation software. Incidental lung nodules that (i) were scored as a nodule by at least two of the three chest radiologists and (ii) met the Fleischner criteria for clinically relevant lung nodules were used as reference standard. We assessed differences in proportion of true positive and false positive findings detected during prospective evaluation by the radiologist on call versus a standalone AI reading. RESULTS: During the trial 59 clinical relevant incidental lung nodules in 35/870 (4.0%) patients had been reported. 24/59 of these nodules were scored by at least two chest radiologists and met the Fleischner criteria, leaving 35/59 false positives. In 458/870 (53%) ULDCT's one or more AI marks were found, 1862 marks in total. 104/1862 (5.6%) AI marks were scored as clinically relevant nodules by at least two chest radiologists, leaving 1758/1862 (94%) false positive marks. Overall, 4 times more (104 vs 24) lung nodules were detected with the use of AI, at the expense of 50 times more false positive findings. CONCLUSION: The use of AI on ULDCT in ED patients with pulmonary disease results in the detection of more clinically relevant incidental lung nodules but is limited by the high false positive rate. CLINICAL RELEVANCE: In the ED setting focus lies on the acute presentation. Artificial intelligence aids in the detection of clinical relevant incidental lung nodules but is limited by the high false positive rate.}, + optnote = {DIAG, RADIOLOGY}, } @article{Berk94, @@ -2515,15 +3131,6 @@ @article{Berk94 gscites = {47}, } -@conference{Berk22, - author = {van den Berk, I.A.H. and Jacobs, C. and Kanglie, M.M.N.P. and Mets, O.M. and Snoeren, M. and Montauban van Swijndregt, A.D. and Taal, E.M. and van Engelen, T.S.R. and Prins, J. and Bipat, S. and Bossuyt, P.M.M. and Stoker, J.}, - title = {Added value of artificial intelligence for the detection and analysis of lung nodules on ultra-low-dose CT in an emergency setting}, - booktitle = RSNA, - year = {2022}, - abstract = {PURPOSE: To analyze the added value of an artificial intelligence (AI) algorithm for lung nodule detection on ultra-low-dose CT’s (ULDCT) acquired at the emergency department (ED). MATERIALS AND METHODS: In the OPTIMACT trial 873 patients with suspected non-traumatic pulmonary disease underwent ULDCT at the ED, 870 patients were available for analysis. During the trial clinical reading of the ULDCT’s was done by the radiologist on call and clinical relevant incidental lung nodules were reported in the radiology report. All ULDCT’s were processed using CIRRUS Lung Nodule AI, a deep learning based algorithm for detection of non-calcified lung nodules ≥ 6 mm. Three chest radiologists independently reviewed the lung nodules identified during the trial and all marks from the AI algorithm. Each AI mark was accepted or rejected for being a lung nodule. Accepted AI marks were classified as solid, part-solid, non-solid and were volumetrically measured using semi-automatic segmentation software. Incidental lung nodules that (i) were scored as a nodule by at least two of the three chest radiologists and (ii) met the Fleischner criteria for clinically relevant lung nodules were used as reference standard. We assessed differences in proportion of true positive and false positive findings detected during prospective evaluation by the radiologist on call versus a standalone AI reading. RESULTS: During the trial 59 clinical relevant incidental lung nodules in 35/870 (4.0%) patients had been reported. 24/59 of these nodules were scored by at least two chest radiologists and met the Fleischner criteria, leaving 35/59 false positives. In 458/870 (53%) ULDCT’s one or more AI marks were found, 1862 marks in total. 104/1862 (5.6%) AI marks were scored as clinically relevant nodules by at least two chest radiologists, leaving 1758/1862 (94%) false positive marks. Overall, 4 times more (104 vs 24) lung nodules were detected with the use of AI, at the expense of 50 times more false positive findings. CONCLUSION: The use of AI on ULDCT in ED patients with pulmonary disease results in the detection of more clinically relevant incidental lung nodules but is limited by the high false positive rate. CLINICAL RELEVANCE: In the ED setting focus lies on the acute presentation. Artificial intelligence aids in the detection of clinical relevant incidental lung nodules but is limited by the high false positive rate.}, - optnote = {DIAG, RADIOLOGY}, -} - @article{Bian18, author = {Bian, Z and Charbonnier, J-P and Liu, J and Zhao, D and Lynch, D A and van Ginneken, B}, title = {Small airway segmentation in thoracic computed tomography scans: a machine learning approach}, @@ -2539,8 +3146,10 @@ @article{Bian18 optnote = {DIAG, RADIOLOGY}, pmid = {29995646}, gsid = {10454150924096992775}, - gscites = {5}, + gscites = {15}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/194505}, + ss_id = {6e733d61fef6612626c4c5350f143fc61db9a0da}, + all_ss_ids = {['6e733d61fef6612626c4c5350f143fc61db9a0da']}, } @article{Bili19, @@ -2552,10 +3161,27 @@ @article{Bili19 optnote = {DIAG, RADIOLOGY}, month = {1}, gsid = {10996095861110041445}, - gscites = {82}, + gscites = {543}, + all_ss_ids = {['0655dcaa39cf41a3609974840f91300d73b4aed1']}, } -@article{Blek19, +@article{Bili22, + title = {The Liver Tumor Segmentation Benchmark (LiTS)}, + journal = MIA, + pages = {102680}, + year = {2022}, + doi = {https://doi.org/10.1016/j.media.2022.102680}, + url = {https://www.sciencedirect.com/science/article/pii/S1361841522003085}, + author = {Patrick Bilic and Patrick Christ and Hongwei Bran Li and Eugene Vorontsov and Avi Ben-Cohen and Georgios Kaissis and Adi Szeskin and Colin Jacobs and Gabriel Efrain Humpire Mamani and Gabriel Chartrand and Fabian Lohofer and Julian Walter Holch and Wieland Sommer and Felix Hofmann and Alexandre Hostettler and Naama Lev-Cohain and Michal Drozdzal and Michal Marianne Amitai and Refael Vivanti and Jacob Sosna and Ivan Ezhov and Anjany Sekuboyina and Fernando Navarro and Florian Kofler and Johannes C. Paetzold and Suprosanna Shit and Xiaobin Hu and Jana Lipkova and Markus Rempfler and Marie Piraud and Jan Kirschke and Benedikt Wiestler and Zhiheng Zhang and Christian Hulsemeyer and Marcel Beetz and Florian Ettlinger and Michela Antonelli and Woong Bae and Miriam Bellver and Lei Bi and Hao Chen and Grzegorz Chlebus and Erik B. Dam and Qi Dou and Chi-Wing Fu and Bogdan Georgescu and Xavier Giro-i-Nieto and Felix Gruen and Xu Han and Pheng-Ann Heng and Jurgen Hesser and Jan Hendrik Moltz and Christian Igel and Fabian Isensee and Paul Jager and Fucang Jia and Krishna Chaitanya Kaluva and Mahendra Khened and Ildoo Kim and Jae-Hun Kim and Sungwoong Kim and Simon Kohl and Tomasz Konopczynski and Avinash Kori and Ganapathy Krishnamurthi and Fan Li and Hongchao Li and Junbo Li and Xiaomeng Li and John Lowengrub and Jun Ma and Klaus Maier-Hein and Kevis-Kokitsi Maninis and Hans Meine and Dorit Merhof and Akshay Pai and Mathias Perslev and Jens Petersen and Jordi Pont-Tuset and Jin Qi and Xiaojuan Qi and Oliver Rippel and Karsten Roth and Ignacio Sarasua and Andrea Schenk and Zengming Shen and Jordi Torres and Christian Wachinger and Chunliang Wang and Leon Weninger and Jianrong Wu and Daguang Xu and Xiaoping Yang and Simon Chun-Ho Yu and Yading Yuan and Miao Yue and Liping Zhang and Jorge Cardoso and Spyridon Bakas and Rickmer Braren and Volker Heinemann and Christopher Pal and An Tang and Samuel Kadoury and Luc Soler and Bram van Ginneken and Hayit Greenspan and Leo Joskowicz and Bjoern Menze}, + abstract = {In this work, we report the set-up and results of the Liver Tumor Segmentation Benchmark (LiTS), which was organized in conjunction with the IEEE International Symposium on Biomedical Imaging (ISBI) 2017 and the International Conferences on Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2017 and 2018. The image dataset is diverse and contains primary and secondary tumors with varied sizes and appearances with various lesion-to-background levels (hyper-/hypo-dense), created in collaboration with seven hospitals and research institutions. Seventy-five submitted liver and liver tumor segmentation algorithms were trained on a set of 131 computed tomography (CT) volumes and were tested on 70 unseen test images acquired from different patients. We found that not a single algorithm performed best for both liver and liver tumors in the three events. The best liver segmentation algorithm achieved a Dice score of 0.963, whereas, for tumor segmentation, the best algorithms achieved Dices scores of 0.674 (ISBI 2017), 0.702 (MICCAI 2017), and 0.739 (MICCAI 2018). Retrospectively, we performed additional analysis on liver tumor detection and revealed that not all top-performing segmentation algorithms worked well for tumor detection. The best liver tumor detection method achieved a lesion-wise recall of 0.458 (ISBI 2017), 0.515 (MICCAI 2017), and 0.554 (MICCAI 2018), indicating the need for further research. LiTS remains an active benchmark and resource for research, e.g., contributing the liver-related segmentation tasks in http://medicaldecathlon.com/. In addition, both data and online evaluation are accessible via www.lits-challenge.com.}, + pmid = {36481607}, + volume = {84}, + ss_id = {0655dcaa39cf41a3609974840f91300d73b4aed1}, + all_ss_ids = {['0655dcaa39cf41a3609974840f91300d73b4aed1']}, + gscites = {543}, +} + +@article{Blek19, author = {Jeroen Bleker and Thomas Kwee and Rudi Dierckx and IgleJan de Jong and Henkjan Huisman and Derya Yakar}, title = {Multiparametric {MRI} and auto-fixed volume of interest-based radiomics signature for clinically significant peripheral zone prostate cancer}, journal = ER, @@ -2563,21 +3189,23 @@ @article{Blek19 month = {9}, doi = {https://doi.org/10.1007/s00330-019-06488-y}, abstract = {Objectives - To create a radiomics approach based on multiparametric magnetic resonance imaging (mpMRI) features extracted from an auto-fixed volume of interest (VOI) that quantifies the phenotype of clinically significant (CS) peripheral zone (PZ) prostate cancer (PCa). - - Methods - This study included 206 patients with 262 prospectively called mpMRI prostate imaging reporting and data system 3-5 PZ lesions. Gleason scores > 6 were defined as CS PCa. Features were extracted with an auto-fixed 12-mm spherical VOI placed around a pin point in each lesion. The value of dynamic contrast-enhanced imaging(DCE), multivariate feature selection and extreme gradient boosting (XGB) vs. univariate feature selection and random forest (RF), expert-based feature pre-selection, and the addition of image filters was investigated using the training (171 lesions) and test (91 lesions) datasets. - - Results - The best model with features from T2-weighted (T2-w) + diffusion-weighted imaging (DWI) + DCE had an area under the curve (AUC) of 0.870 (95% CI 0.980-0.754). Removal of DCE features decreased AUC to 0.816 (95% CI 0.920-0.710), although not significantly (p = 0.119). Multivariate and XGB outperformed univariate and RF (p = 0.028). Expert-based feature pre-selection and image filters had no significant contribution. - - Conclusions - The phenotype of CS PZ PCa lesions can be quantified using a radiomics approach based on features extracted from T2-w + DWI using an auto-fixed VOI. Although DCE features improve diagnostic performance, this is not statistically significant. Multivariate feature selection and XGB should be preferred over univariate feature selection and RF. The developed model may be a valuable addition to traditional visual assessment in diagnosing CS PZ PCa.}, + To create a radiomics approach based on multiparametric magnetic resonance imaging (mpMRI) features extracted from an auto-fixed volume of interest (VOI) that quantifies the phenotype of clinically significant (CS) peripheral zone (PZ) prostate cancer (PCa). + + Methods + This study included 206 patients with 262 prospectively called mpMRI prostate imaging reporting and data system 3-5 PZ lesions. Gleason scores > 6 were defined as CS PCa. Features were extracted with an auto-fixed 12-mm spherical VOI placed around a pin point in each lesion. The value of dynamic contrast-enhanced imaging(DCE), multivariate feature selection and extreme gradient boosting (XGB) vs. univariate feature selection and random forest (RF), expert-based feature pre-selection, and the addition of image filters was investigated using the training (171 lesions) and test (91 lesions) datasets. + + Results + The best model with features from T2-weighted (T2-w) + diffusion-weighted imaging (DWI) + DCE had an area under the curve (AUC) of 0.870 (95% CI 0.980-0.754). Removal of DCE features decreased AUC to 0.816 (95% CI 0.920-0.710), although not significantly (p = 0.119). Multivariate and XGB outperformed univariate and RF (p = 0.028). Expert-based feature pre-selection and image filters had no significant contribution. + + Conclusions + The phenotype of CS PZ PCa lesions can be quantified using a radiomics approach based on features extracted from T2-w + DWI using an auto-fixed VOI. Although DCE features improve diagnostic performance, this is not statistically significant. Multivariate feature selection and XGB should be preferred over univariate feature selection and RF. The developed model may be a valuable addition to traditional visual assessment in diagnosing CS PZ PCa.}, file = {:pdf/Blek19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {31776744}, gsid = {12882217133592383323}, - gscites = {2}, + gscites = {39}, + ss_id = {06d09d8c1c217b9bfa0f89736258b8f3b4edc555}, + all_ss_ids = {['06d09d8c1c217b9bfa0f89736258b8f3b4edc555']}, } @article{Blek21, @@ -2590,17 +3218,20 @@ @article{Blek21 doi = {https://doi.org/10.1186/s13244-021-01099-y}, optnote = {DIAG, RADIOLOGY}, abstract = {Objectives - To investigate a previously developed radiomics-based biparametric magnetic resonance imaging (bpMRI) approach for discrimination of clinically significant peripheral zone prostate cancer (PZ csPCa) using multi-center, multi-vendor (McMv) and single-center, single-vendor (ScSv) datasets. - - Methods - This study's starting point was a previously developed ScSv algorithm for PZ csPCa whose performance was demonstrated in a single-center dataset. A McMv dataset was collected, and 262 PZ PCa lesions (9 centers, 2 vendors) were selected to identically develop a multi-center algorithm. The single-center algorithm was then applied to the multi-center dataset (single-multi-validation), and the McMv algorithm was applied to both the multi-center dataset (multi-multi-validation) and the previously used single-center dataset (multi-single-validation). The areas under the curve (AUCs) of the validations were compared using bootstrapping. - - Results - Previously the single-single validation achieved an AUC of 0.82 (95% CI 0.71-0.92), a significant performance reduction of 27.2% compared to the single-multi-validation AUC of 0.59 (95% CI 0.51-0.68). The new multi-center model achieved a multi-multi-validation AUC of 0.75 (95% CI 0.64-0.84). Compared to the multi-single-validation AUC of 0.66 (95% CI 0.56-0.75), the performance did not decrease significantly (p value: 0.114). Bootstrapped comparison showed similar single-center performances and a significantly different multi-center performance (p values: 0.03, 0.012). - - Conclusions - A single-center trained radiomics-based bpMRI model does not generalize to multi-center data. Multi-center trained radiomics-based bpMRI models do generalize, have equal single-center performance and perform better on multi-center data.}, + To investigate a previously developed radiomics-based biparametric magnetic resonance imaging (bpMRI) approach for discrimination of clinically significant peripheral zone prostate cancer (PZ csPCa) using multi-center, multi-vendor (McMv) and single-center, single-vendor (ScSv) datasets. + + Methods + This study's starting point was a previously developed ScSv algorithm for PZ csPCa whose performance was demonstrated in a single-center dataset. A McMv dataset was collected, and 262 PZ PCa lesions (9 centers, 2 vendors) were selected to identically develop a multi-center algorithm. The single-center algorithm was then applied to the multi-center dataset (single-multi-validation), and the McMv algorithm was applied to both the multi-center dataset (multi-multi-validation) and the previously used single-center dataset (multi-single-validation). The areas under the curve (AUCs) of the validations were compared using bootstrapping. + + Results + Previously the single-single validation achieved an AUC of 0.82 (95% CI 0.71-0.92), a significant performance reduction of 27.2% compared to the single-multi-validation AUC of 0.59 (95% CI 0.51-0.68). The new multi-center model achieved a multi-multi-validation AUC of 0.75 (95% CI 0.64-0.84). Compared to the multi-single-validation AUC of 0.66 (95% CI 0.56-0.75), the performance did not decrease significantly (p value: 0.114). Bootstrapped comparison showed similar single-center performances and a significantly different multi-center performance (p values: 0.03, 0.012). + + Conclusions + A single-center trained radiomics-based bpMRI model does not generalize to multi-center data. Multi-center trained radiomics-based bpMRI models do generalize, have equal single-center performance and perform better on multi-center data.}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/239809}, + ss_id = {3d130766be579a65496f87ec07f51123206fe131}, + all_ss_ids = {['3d130766be579a65496f87ec07f51123206fe131']}, + gscites = {12}, } @article{Blek22, @@ -2610,6 +3241,24 @@ @article{Blek22 pages = {1--10}, year = {2022}, publisher = {Springer}, + ss_id = {9d6b9f6c203c73af662853a1320659a62eb9be4b}, + all_ss_ids = {['9d6b9f6c203c73af662853a1320659a62eb9be4b']}, + gscites = {10}, +} + +@article{Blek23, + author = {Bleker, Jeroen and Roest, Christian and Yakar, Derya and Huisman, Henkjan and Kwee, Thomas C.}, + title = {~~The Effect of Image Resampling on the Performance of Radiomics-Based Artificial Intelligence in Multicenter Prostate MRI}, + doi = {10.1002/jmri.28935}, + year = {2023}, + abstract = {BackgroundSingle center MRI radiomics models are sensitive to data heterogeneity, limiting the diagnostic capabilities of current prostate cancer (PCa) radiomics models.PurposeTo study the impact of image resampling on the diagnostic performance of radiomics in a multicenter prostate MRI setting.Study TypeRetrospective.PopulationNine hundred thirty patients (nine centers, two vendors) with 737 eligible PCa lesions, randomly split into training (70%, N = 500), validation (10%, N = 89), and a held-out test set (20%, N = 148).Field Strength/Sequence1.5T and 3T scanners/T2-weighted imaging (T2W), diffusion-weighted imaging (DWI), and apparent diffusion coefficient maps.AssessmentA total of 48 normalized radiomics datasets were created using various resampling methods, including different target resolutions (T2W: 0.35, 0.5, and 0.8 mm; DWI: 1.37, 2, and 2.5 mm), dimensionalities (2D/3D) and interpolation techniques (nearest neighbor, linear, Bspline and Blackman windowed-sinc). Each of the datasets was used to train a radiomics model to detect clinically relevant PCa (International Society of Urological Pathology grade >= 2). Baseline models were constructed using 2D and 3D datasets without image resampling. The resampling configurations with highest validation performance were evaluated in the test dataset and compared to the baseline models.Statistical TestsArea under the curve (AUC), DeLong test. The significance level used was 0.05.ResultsThe best 2D resampling model (T2W: Bspline and 0.5 mm resolution, DWI: nearest neighbor and 2 mm resolution) significantly outperformed the 2D baseline (AUC: 0.77 vs. 0.64). The best 3D resampling model (T2W: linear and 0.8 mm resolution, DWI: nearest neighbor and 2.5 mm resolution) significantly outperformed the 3D baseline (AUC: 0.79 vs. 0.67).Data ConclusionImage resampling has a significant effect on the performance of multicenter radiomics artificial intelligence in prostate MRI. The recommended 2D resampling configuration is isotropic resampling with T2W at 0.5 mm (Bspline interpolation) and DWI at 2 mm (nearest neighbor interpolation). For the 3D radiomics, this work recommends isotropic resampling with T2W at 0.8 mm (linear interpolation) and DWI at 2.5 mm (nearest neighbor interpolation).Evidence Level3Technical EfficacyStage 2}, + url = {http://dx.doi.org/10.1002/jmri.28935}, + file = {Blek23.pdf:pdf\\Blek23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Magnetic Resonance Imaging}, + automatic = {yes}, + citation-count = {0}, + pmid = {37572098}, } @article{Blue10, @@ -2628,6 +3277,8 @@ @article{Blue10 month = {4}, gsid = {2663774235436162653}, gscites = {64}, + ss_id = {c05538614208ef1e78d54fa9325730adcad07b7d}, + all_ss_ids = {['c05538614208ef1e78d54fa9325730adcad07b7d']}, } @article{Blue12, @@ -2663,6 +3314,8 @@ @article{Blue15 month = {3}, gsid = {7320665817091407981}, gscites = {3}, + ss_id = {5447ca0a02102769866deb7a4cccbeb0dcd9ad46}, + all_ss_ids = {['5447ca0a02102769866deb7a4cccbeb0dcd9ad46']}, } @mastersthesis{Boer18, @@ -2702,14 +3355,17 @@ @article{Boer20 year = {2020}, month = {3}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/217643}, + ss_id = {4992f23e29e711eec8b92e17ce35610da3758ecd}, + all_ss_ids = {['4992f23e29e711eec8b92e17ce35610da3758ecd']}, + gscites = {27}, } @mastersthesis{Boer2020, author = {Tristan de Boer}, title = {A feasibility study for Deep Learning Image Guided Guidewire Tracking for Image-guided Interventions}, abstract = {A feasibility study for Deep Learning Image Guided Guidewire Tracking for Image-guided Interventions - A current challenge in real-time magnetic resonance imaging (MRI) guided minimally invasive images is needle tracking and planning. We propose a pipeline for automatic object detection using a state-of-the-art object detection network. Predictions by the object detection network were used to translate the MRI plane to keep a guidewire tip in a plane. We evaluated the pipeline on displacement error between the prediction and the actual location of the guidewire tip in a setup with an anthropomorphic blood vessel. For this setup, we hypothesized that the network should be able to correctly predict the actual location within a margin of 10 mm, at least within 1000 ms. - Results show that the pipeline can accurately track the guidewire tip in real-time (within 458 ms), with a mean displacement error of 7 mm (s = 4). Based on this evidence, we have demonstrated the feasibility of deep learning assisted image-guided interventions, creating possibilities for other deep learning guided interventions. Our proposed method shows potential for cryoablation. During these types of minimally invasive procedures tracking needles can be a challenge.}, + A current challenge in real-time magnetic resonance imaging (MRI) guided minimally invasive images is needle tracking and planning. We propose a pipeline for automatic object detection using a state-of-the-art object detection network. Predictions by the object detection network were used to translate the MRI plane to keep a guidewire tip in a plane. We evaluated the pipeline on displacement error between the prediction and the actual location of the guidewire tip in a setup with an anthropomorphic blood vessel. For this setup, we hypothesized that the network should be able to correctly predict the actual location within a margin of 10 mm, at least within 1000 ms. + Results show that the pipeline can accurately track the guidewire tip in real-time (within 458 ms), with a mean displacement error of 7 mm (s = 4). Based on this evidence, we have demonstrated the feasibility of deep learning assisted image-guided interventions, creating possibilities for other deep learning guided interventions. Our proposed method shows potential for cryoablation. During these types of minimally invasive procedures tracking needles can be a challenge.}, file = {:pdf/Boer20a.pdf:PDF}, optnote = {DIAG}, school = {Radboud University}, @@ -2731,6 +3387,26 @@ @article{Boga22 publisher = {Springer Science and Business Media {LLC}}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/251830}, + ss_id = {a5c0ae9b50b0058f6e16bdc59a95e489b897e430}, + all_ss_ids = {['a5c0ae9b50b0058f6e16bdc59a95e489b897e430']}, + gscites = {10}, +} + +@article{Boga23, + author = {Bogaerts, Joep M A and van Bommel, Majke H D and Hermens, Rosella P M G and Steenbeek, Miranda P and de Hullu, Joanne A and van der Laak, Jeroen A W M and Simons, Michiel and STIC consortium}, + title = {~~Consensus based recommendations for the diagnosis of serous tubal intraepithelial carcinoma: an international Delphi study}, + doi = {10.1111/his.14902}, + year = {2023}, + abstract = {AimReliably diagnosing or safely excluding serous tubal intraepithelial carcinoma (STIC), a precursor lesion of tubo-ovarian high-grade serous carcinoma (HGSC), is crucial for individual patient care, for better understanding the oncogenesis of HGSC, and for safely investigating novel strategies to prevent tubo-ovarian carcinoma. To optimize STIC diagnosis and increase its reproducibility, we set up a three-round Delphi study.Methods and resultsIn round 1, an international expert panel of 34 gynecologic pathologists, from 11 countries, was assembled to provide input regarding STIC diagnosis, which was used to develop a set of statements. In round 2, the panel rated their level of agreement with those statements on a 9-point Likert scale. In round 3, statements without previous consensus were rated again by the panel while anonymously disclosing the responses of the other panel members. Finally, each expert was asked to approve or disapprove the complete set of consensus statements. The panel indicated their level of agreement with 64 statements. A total of 27 statements (42%) reached consensus after three rounds. These statements reflect the entire diagnostic work-up for pathologists, regarding processing and macroscopy (three statements); microscopy (eight statements); immunohistochemistry (nine statements); interpretation and reporting (four statements); and miscellaneous (three statements). The final set of consensus statements was approved by 85%.ConclusionThis study provides an overview of current clinical practice regarding STIC diagnosis amongst expert gynecopathologists. The experts' consensus statements form the basis for a set of recommendations, which may help towards more consistent STIC diagnosis.}, + url = {http://dx.doi.org/10.1111/his.14902}, + file = {Boga23.pdf:pdf\\Boga23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Histopathology}, + automatic = {yes}, + citation-count = {0}, + pages = {67-79}, + volume = {83}, + pmid = {36939551}, } @article{Bogu19, @@ -2748,7 +3424,23 @@ @article{Bogu19 pmid = {30835214}, year = {2019}, gsid = {11669534821534873919}, - gscites = {19}, + gscites = {137}, + ss_id = {6fd044b6c49661a5a1db4b49543c9a6ca4b108fb}, + all_ss_ids = {['6fd044b6c49661a5a1db4b49543c9a6ca4b108fb']}, +} + +@article{Bohn22, + author = {Bohner, Lauren and Vinayahalingam, Shankeeth and Kleinheinz, Johannes and Hanisch, Marcel}, + title = {Digital Implant Planning in Patients with Ectodermal Dysplasia: Clinical Report.}, + doi = {10.3390/ijerph19031489}, + issue = {3}, + volume = {19}, + abstract = {Ectodermal dysplasia may severely affect the development of jaw growth and facial appearance. This case report describes the treatment of two patients suffering from ectodermal dysplasia, both treated with dental implant-fixed restorations by means of computer-guided surgery. Two patients presented to our clinic with congenital malformation of the jaw as a manifestation of ectodermal dysplasia, showing oligodontia and alveolar ridge deficit. Clinical examination revealed multiple unattached teeth and a need for prosthetic therapy. For both cases, dental implants were placed based on a computer-guided planning. A surgical guide was used to determine the positioning of the dental implants according to the prosthetic planning, which allowed for a satisfactory aesthetic and functional outcome. Computer-guided implant placement allowed predictable treatment of complex cases with satisfactory aesthetic and functional results. Adequate surgical and prosthetic planning is considered critical for treatment success.}, + file = {Bohn22.pdf:pdf\\Bohn22.pdf:PDF}, + journal = {International journal of environmental research and public health}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35162510}, + year = {2022}, } @article{Bois12, @@ -2764,6 +3456,9 @@ @article{Bois12 number = {1}, pmid = {22189243}, month = {1}, + ss_id = {69ee456f69165c71d2c731334f93039c7cf21f9b}, + all_ss_ids = {['69ee456f69165c71d2c731334f93039c7cf21f9b']}, + gscites = {3}, } @inproceedings{Bokh18a, @@ -2779,7 +3474,9 @@ @inproceedings{Bokh18a file = {Bokh18a.pdf:pdf\\Bokh18a.pdf:PDF}, optnote = {DIAG}, gsid = {18087021687980029988}, - gscites = {2}, + gscites = {11}, + ss_id = {bf16e7e46d23a0e83bd2efe2d13e7ba7b0c47b51}, + all_ss_ids = {['bf16e7e46d23a0e83bd2efe2d13e7ba7b0c47b51']}, } @inproceedings{Bokh19, @@ -2792,11 +3489,13 @@ @inproceedings{Bokh19 pages = {81-94}, url = {http://proceedings.mlr.press/v102/bokhorst19a.html}, abstract = {We investigate the problem of building convolutional networks for semantic segmentation in histopathology images when weak supervision in the form of sparse manual annotations is provided in the training set. We propose to address this problem by modifying the loss function in order to balance the contribution of each pixel of the input data. We introduce and compare two approaches of loss balancing when sparse annotations are provided, namely (1) instance based balancing and (2) mini-batch based balancing. We also consider a scenario of full supervision in the form of dense annotations, and compare the performance of using either sparse or dense annotations with the proposed balancing schemes. Finally, we show that using a bulk of sparse annotations and a - small fraction of dense annotations allows to achieve performance comparable to full supervision.}, + small fraction of dense annotations allows to achieve performance comparable to full supervision.}, file = {Bokh19.pdf:pdf\\Bokh19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {9237806221216165855}, - gscites = {2}, + gscites = {34}, + ss_id = {e58b84e96868eb5f165ab710ef43668c4d8bbf74}, + all_ss_ids = {['e58b84e96868eb5f165ab710ef43668c4d8bbf74']}, } @conference{Bokh19a, @@ -2821,7 +3520,8 @@ @article{Bokh19b pmid = {31844269}, month = {12}, gsid = {15779257869957117093}, - gscites = {2}, + gscites = {28}, + all_ss_ids = {['aff9f3cd8024fb64c7ea36d396a36ebf33d01f5b', 'c3e98e4b01c27affe1b6c161172c5e27e8141618']}, } @conference{Bokh20, @@ -2829,16 +3529,16 @@ @conference{Bokh20 booktitle = ECP, title = {Computer-assisted hot-spot selection for tumor budding assessment in colorectal cancer}, abstract = {Background & objectives - Tumor budding (TB) is an established prognosticator for colorectal cancer. Detection of the hot-spot to score TB is based on visual inspection, hindering reproducibility of this important factor. We present an algorithm that can potentially assist pathologists in this task. - - Methods - We used a previously developed algorithm for the detection of tumor buds in pan-cytokeratin stained whole slide images, calculating the number of buds for each location using a circle with 0.785mm2 surface area. From these numbers, density heatmaps were produced. The algorithm was applied to 270 slides from Bern University hospital, in which hot-spots and tumor buds were visually identified. - - Results - Heat maps were created and we located the hand-selected hotspot and noted the associated TB number. The differences and similarities between computer identified and manually selected hot-spots were visually assessed as well as via histograms. Preliminary results show that the heatmaps are helpful, as locations with the highest TB density (the top 15%) also include the hand-selected hotspots. The full results will be presented during the conference. - - Conclusion - The presented algorithm can assist the pathologist in selecting the hot-spot with the highest tumor bud count with more ease at low magnification and can help to reduce the high interobserver variability among pathologists in scoring tumor budding.}, + Tumor budding (TB) is an established prognosticator for colorectal cancer. Detection of the hot-spot to score TB is based on visual inspection, hindering reproducibility of this important factor. We present an algorithm that can potentially assist pathologists in this task. + + Methods + We used a previously developed algorithm for the detection of tumor buds in pan-cytokeratin stained whole slide images, calculating the number of buds for each location using a circle with 0.785mm2 surface area. From these numbers, density heatmaps were produced. The algorithm was applied to 270 slides from Bern University hospital, in which hot-spots and tumor buds were visually identified. + + Results + Heat maps were created and we located the hand-selected hotspot and noted the associated TB number. The differences and similarities between computer identified and manually selected hot-spots were visually assessed as well as via histograms. Preliminary results show that the heatmaps are helpful, as locations with the highest TB density (the top 15%) also include the hand-selected hotspots. The full results will be presented during the conference. + + Conclusion + The presented algorithm can assist the pathologist in selecting the hot-spot with the highest tumor bud count with more ease at low magnification and can help to reduce the high interobserver variability among pathologists in scoring tumor budding.}, optnote = {DIAG}, year = {2020}, } @@ -2848,18 +3548,100 @@ @conference{Bokh20a booktitle = ECP, title = {Deep learning based tumor bud detection in pan-cytokeratin stained colorectal cancer whole-slide images}, abstract = {Background & objectives - Tumor budding (TB) is an established prognosticator for colorectal cancer. Deep learning based TB assessment has the potential to improve diagnostic reproducibility and efficiency. We developed an algorithm that can detect individual tumor buds in pan-cytokeratin stained colorectal cancer slides + Tumor budding (TB) is an established prognosticator for colorectal cancer. Deep learning based TB assessment has the potential to improve diagnostic reproducibility and efficiency. We developed an algorithm that can detect individual tumor buds in pan-cytokeratin stained colorectal cancer slides + + Methods + Tumor-bud candidates (n=1765, collected from 58 whole slide images; WSI) were labeled by seven experts as either TB, poorly differentiated cluster, or neither. The 58 slides were randomly split into a training (49) and test-set (9). A deep learning (DL) model was trained using the buds identified by the experts in the training set. + + Results + The algorithm was tested on the nine remaining WSI and 270 WSI from pan-cytokeratin stained slides from Bern University hospital, in which hot spots and TB were manually scored. An F1 score of 0.82 was found for correspondence at the bud level between experts and DL. A correlation of 0.745 was found between the manually counted buds within the hotspots and the automated method in the 270 WSIs. + + Conclusion + Assessment of tumor budding as a prognostic factor for colorectal cancer can be automated using deep learning. At the level of individual tumor buds, correspondence between DL and experts is high and comparable to the inter-rater variability. However, compared to the manual procedure, the algorithm yields higher counts for cases with relatively high bud densities (>15). Follow-up studies will focus on the assessment of TB in H&E stained slides.}, + optnote = {DIAG}, + year = {2020}, +} + +@article{Bokh21, + author = {Bokhorst, John-Melle and Nagtegaal, Iris D. and Fraggetta, Filippo and Vatrano, Simona and Mesker, Wilma and Vieth, Michael and van der Laak, Jeroen and Ciompi, Francesco}, + title = {~~Automated risk classification of colon biopsies based on semantic segmentation of histopathology images}, + doi = {10.48550/ARXIV.2109.07892}, + year = {2021}, + abstract = {Artificial Intelligence (AI) can potentially support histopathologists in the diagnosis of a broad spectrum of cancer types. In colorectal cancer (CRC), AI can alleviate the laborious task of characterization and reporting on resected biopsies, including polyps, the numbers of which are increasing as a result of CRC population screening programs, ongoing in many countries all around the globe. Here, we present an approach to address two major challenges in automated assessment of CRC histopathology whole-slide images. First, we present an AI-based method to segment multiple tissue compartments in the H\&E-stained whole-slide image, which provides a different, more perceptible picture of tissue morphology and composition. We test and compare a panel of state-of-the-art loss functions available for segmentation models, and provide indications about their use in histopathology image segmentation, based on the analysis of a) a multi-centric cohort of CRC cases from five medical centers in the Netherlands and Germany, and b) two publicly available datasets on segmentation in CRC. Second, we use the best performing AI model as the basis for a computer-aided diagnosis system (CAD) that classifies colon biopsies into four main categories that are relevant pathologically. We report the performance of this system on an independent cohort of more than 1,000 patients. The results show the potential of such an AI-based system to assist pathologists in diagnosis of CRC in the context of population screening. We have made the segmentation model available for research use on https://grand-challenge.org/algorithms/colon-tissue-segmentation/.}, + url = {https://arxiv.org/abs/2109.07892}, + file = {Bokh21.pdf:pdf\\Bokh21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2109.07892}, + automatic = {yes}, +} + +@article{Bokh23, + author = {J. Bokhorst and I. Nagtegaal and F. Fraggetta and S. Vatrano and W. Mesker and Michael Vieth and J. A. van der Laak and F. Ciompi}, + title = {Deep learning for multi-class semantic segmentation enables colorectal cancer detection and classification in digital pathology images}, + abstract = {In colorectal cancer (CRC), artificial intelligence (AI) can alleviate the laborious task of characterization and reporting on resected biopsies, including polyps, the numbers of which are increasing as a result of CRC population screening programs ongoing in many countries all around the globe. Here, we present an approach to address two major challenges in the automated assessment of CRC histopathology whole-slide images. We present an AI-based method to segment multiple (n=14) tissue compartments in the H &E-stained whole-slide image, which provides a different, more perceptible picture of tissue morphology and composition. We test and compare a panel of state-of-the-art loss functions available for segmentation models, and provide indications about their use in histopathology image segmentation, based on the analysis of (a) a multi-centric cohort of CRC cases from five medical centers in the Netherlands and Germany, and (b) two publicly available datasets on segmentation in CRC. We used the best performing AI model as the basis for a computer-aided diagnosis system that classifies colon biopsies into four main categories that are relevant pathologically. We report the performance of this system on an independent cohort of more than 1000 patients. The results show that with a good segmentation network as a base, a tool can be developed which can support pathologists in the risk stratification of colorectal cancer patients, among other possible uses. We have made the segmentation model available for research use on https://grand-challenge.org/algorithms/colon-tissue-segmentation/.}, + file = {Bokh23.pdf:pdf\\Bokh23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY}, + pmid = {37225743}, + year = {2023}, + journal = NATSCIREP, + volume = {13}, + pages = {8398}, + doi = {10.1038/s41598-023-35491-z}, + ss_id = {f7c9d9a217387d3a73a39b232a3bc14e6706b533}, + all_ss_ids = {['f7c9d9a217387d3a73a39b232a3bc14e6706b533']}, + gscites = {1}, +} - Methods - Tumor-bud candidates (n=1765, collected from 58 whole slide images; WSI) were labeled by seven experts as either TB, poorly differentiated cluster, or neither. The 58 slides were randomly split into a training (49) and test-set (9). A deep learning (DL) model was trained using the buds identified by the experts in the training set. +@article{Bokh23a, + author = {J. Bokhorst and I. Nagtegaal and I. Zlobec and H. Dawson and K. Sheahan and F. Simmer and R. Kirsch and Michael Vieth and A. Lugli and J. A. van der Laak and F. Ciompi}, + title = {Semi-Supervised Learning to Automate Tumor Bud Detection in Cytokeratin-Stained Whole-Slide Images of Colorectal Cancer}, + url = {https://www.semanticscholar.org/paper/be741d6f455a941f206de7fecb44f81678f385bf}, + abstract = {Simple Summary Tumor budding is a promising and cost-effective histological biomarker with strong prognostic value in colorectal cancer. It is defined by the presence of single tumor cells or small clusters of cells within the tumor or at the tumor-invasion front. Deep learning based tumor bud assessment can potentially improve diagnostic reproducibility and efficiency. This study aimed to develop a deep learning algorithm to detect tumor buds in cytokeratin-stained images automatically. We used a semi-supervised learning technique to overcome the limitations of a small dataset. Validation of our model showed a sensitivity of 91% and a fairly strong correlation between a human annotator and our deep learning method. We demonstrate that the automated tumor bud count achieves a prognostic value similar to visual estimation. We also investigate new metrics for quantifying buds, such as density and dispersion, and report on their predictive value. Abstract Tumor budding is a histopathological biomarker associated with metastases and adverse survival outcomes in colorectal carcinoma (CRC) patients. It is characterized by the presence of single tumor cells or small clusters of cells within the tumor or at the tumor-invasion front. In order to obtain a tumor budding score for a patient, the region with the highest tumor bud density must first be visually identified by a pathologist, after which buds will be counted in the chosen hotspot field. The automation of this process will expectedly increase efficiency and reproducibility. Here, we present a deep learning convolutional neural network model that automates the above procedure. For model training, we used a semi-supervised learning method, to maximize the detection performance despite the limited amount of labeled training data. The model was tested on an independent dataset in which human- and machine-selected hotspots were mapped in relation to each other and manual and machine detected tumor bud numbers in the manually selected fields were compared. We report the results of the proposed method in comparison with visual assessment by pathologists. We show that the automated tumor bud count achieves a prognostic value comparable with visual estimation, while based on an objective and reproducible quantification. We also explore novel metrics to quantify buds such as density and dispersion and report their prognostic value. We have made the model available for research use on the grand-challenge platform.}, + file = {Bokh23a.pdf:pdf\\Bokh23a.pdf:PDF}, + optnote = {DIAG, PATHOLOGY}, + journal = {Cancers}, + volume = {15}, + issue = {7}, + pages = {2079}, + pmid = {37046742}, + year = {2023}, + doi = {10.3390/cancers15072079}, + ss_id = {be741d6f455a941f206de7fecb44f81678f385bf}, + all_ss_ids = {['be741d6f455a941f206de7fecb44f81678f385bf']}, + gscites = {2}, +} - Results - The algorithm was tested on the nine remaining WSI and 270 WSI from pan-cytokeratin stained slides from Bern University hospital, in which hot spots and TB were manually scored. An F1 score of 0.82 was found for correspondence at the bud level between experts and DL. A correlation of 0.745 was found between the manually counted buds within the hotspots and the automated method in the 270 WSIs. +@article{Bokh23b, + author = {Bokhorst, John-Melle and Ciompi, Francesco and \"{O}zt\"{u}rk, Sonay Kus and Oguz Erdogan, Ayse Selcen and Vieth, Michael and Dawson, Heather and Kirsch, Richard and Simmer, Femke and Sheahan, Kieran and Lugli, Alessandro and Zlobec, Inti and van der Laak, Jeroen and Nagtegaal, Iris D.}, + title = {Fully Automated Tumor Bud Assessment in Hematoxylin and Eosin-Stained Whole Slide Images of Colorectal Cancer}, + doi = {10.1016/j.modpat.2023.100233}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.modpat.2023.100233}, + file = {Bokh23b.pdf:pdf\Bokh23b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Modern Pathology}, + citation-count = {0}, + automatic = {yes}, + pages = {100233}, + volume = {36}, + ss_id = {0e7ef01ded474485f8f715342f18907f451526c5}, + all_ss_ids = {['0e7ef01ded474485f8f715342f18907f451526c5']}, + gscites = {0}, +} - Conclusion - Assessment of tumor budding as a prognostic factor for colorectal cancer can be automated using deep learning. At the level of individual tumor buds, correspondence between DL and experts is high and comparable to the inter-rater variability. However, compared to the manual procedure, the algorithm yields higher counts for cases with relatively high bud densities (>15). Follow-up studies will focus on the assessment of TB in H&E stained slides.}, - optnote = {DIAG}, - year = {2020}, +@inproceedings{Bomm22, + author = {van Bommel, Majke and Bogaerts, Joep and Hermens, Rosella and Steenbeek, Miranda and de Hullu, Joanne and van der Laak, Jeroen and Simons, Michiel}, + title = {~~2022-RA-646-ESGO Consensus based recommendations for the diagnosis of serous tubal intraepithelial carcinoma, an international delphi study}, + doi = {10.1136/ijgc-2022-esgo.790}, + year = {2022}, + abstract = {Introduction/Background Reliable diagnosis of precursor lesions to high grade serous cancer (HGSC) is crucial, for individual patient care, for better understanding its oncogenesis and for research regarding novel strategies to prevent ovarian cancer. These precursor lesions, serous tubal intraepithelial carcinoma (STIC), are difficult to diagnose: the lesion is small, rare, and clear diagnostic criteria are lacking. We aim to optimize STIC diagnosis by providing recommendations for STIC diagnosis, based on international consensus from gynecopathologists. Methodology A three-round Delphi study was conducted to systematically explore current clinical practice and to reach consensus regarding STIC diagnosis. First, an expert panel consisting of international gynecopathologists was formed. This panel was asked to provide information regarding all relevant aspects of STIC diagnostics, which was used to form a set of statements. Second, the panel rated their agreement on those statements. Third, statements without consensus, according to predefined rules, were rated again by the panel members in the light of the anonymous responses to round 2 of the other panel members. Finally, each expert was asked to either approve or disapprove the set of consensus statements. Results A panel of 34 gynecopathologists from 11 countries rated their agreement on 64 statements. A total of 27 statements (42%) reached consensus. This set reflects the entire diagnostic workup for pathologists, regarding processing and macroscopy, microscopy, immunohistochemistry, interpretation and reporting. The final set of consensus statements was approved by 76% of the experts. Conclusion A set of 27 statements regarding STIC diagnosis reached consensus by an international expert panel of gynecopathologists. Those consensus statements contribute to a basis for international standards for STIC diagnosis, which are urgently needed for better understanding of HGSC, for better counselling of patients, and for safely investigating novel preventive strategies for women at high risk of ovarian cancer.}, + url = {http://dx.doi.org/10.1136/ijgc-2022-esgo.790}, + file = {Bomm22.pdf:pdf\\Bomm22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Pathology}, + automatic = {yes}, + citation-count = {0}, } @article{Boo09, @@ -2879,6 +3661,8 @@ @article{Boo09 gsid = {5227608991913046865}, gscites = {38}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/81498}, + ss_id = {40cc7c929546e4422263fa844a8cd6f47d266ead}, + all_ss_ids = {['40cc7c929546e4422263fa844a8cd6f47d266ead']}, } @article{Boo11, @@ -2895,6 +3679,9 @@ @article{Boo11 number = {12}, pmid = {21963532}, month = {12}, + ss_id = {3363021df95dabbccfb6e95762195b67c9d8a259}, + all_ss_ids = {['3363021df95dabbccfb6e95762195b67c9d8a259']}, + gscites = {25}, } @article{Boo11a, @@ -2911,6 +3698,9 @@ @article{Boo11a number = {5}, pmid = {22021501}, month = {11}, + ss_id = {271cc27f0f6b1b4a87e43eed7c85df9c729b3279}, + all_ss_ids = {['271cc27f0f6b1b4a87e43eed7c85df9c729b3279']}, + gscites = {0}, } @article{Boo11b, @@ -2927,6 +3717,9 @@ @article{Boo11b number = {9}, pmid = {21570679}, month = {9}, + ss_id = {23bcee96f8bb86f925bb2d3d0d3b0fb3d870fcee}, + all_ss_ids = {['23bcee96f8bb86f925bb2d3d0d3b0fb3d870fcee']}, + gscites = {18}, } @phdthesis{Boo12, @@ -2957,6 +3750,9 @@ @article{Boo12a number = {8}, pmid = {22447377}, month = {3}, + ss_id = {d67d1dbabc705e5b2d7cb675e8737ff249c1a88f}, + all_ss_ids = {['d67d1dbabc705e5b2d7cb675e8737ff249c1a88f']}, + gscites = {32}, } @inproceedings{Boom12, @@ -2972,6 +3768,9 @@ @inproceedings{Boom12 file = {:./pdf/Boom12.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {2}, + ss_id = {4d53bd36934e6f17b2e3eb58a8764d03340402e6}, + all_ss_ids = {['4d53bd36934e6f17b2e3eb58a8764d03340402e6']}, + gscites = {2}, } @conference{Boom12a, @@ -3008,6 +3807,21 @@ @article{Boom14 gsid = {1905732569550541986}, gscites = {5}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/136581}, + ss_id = {608a5409afa9a5f535d5b3f4776b75ae92ec9e26}, + all_ss_ids = {['608a5409afa9a5f535d5b3f4776b75ae92ec9e26']}, +} + +@article{Bort20, + author = {Bortsova, Gerda and Gonz\'{a}lez-Gonzalo, Cristina and Wetstein, Suzanne C. and Dubost, Florian and Katramados, Ioannis and Hogeweg, Laurens and Liefers, Bart and van Ginneken, Bram and Pluim, Josien P. W. and Veta, Mitko and S\'{a}nchez, Clara I. and de Bruijne, Marleen}, + title = {~~Adversarial Attack Vulnerability of Medical Image Analysis Systems: Unexplored Factors}, + doi = {10.48550/ARXIV.2006.06356}, + year = {2020}, + abstract = {Adversarial attacks are considered a potentially serious security threat for machine learning systems. Medical image analysis (MedIA) systems have recently been argued to be vulnerable to adversarial attacks due to strong financial incentives and the associated technological infrastructure. In this paper, we study previously unexplored factors affecting adversarial attack vulnerability of deep learning MedIA systems in three medical domains: ophthalmology, radiology, and pathology. We focus on adversarial black-box settings, in which the attacker does not have full access to the target model and usually uses another model, commonly referred to as surrogate model, to craft adversarial examples. We consider this to be the most realistic scenario for MedIA systems. Firstly, we study the effect of weight initialization (ImageNet vs. random) on the transferability of adversarial attacks from the surrogate model to the target model. Secondly, we study the influence of differences in development data between target and surrogate models. We further study the interaction of weight initialization and data differences with differences in model architecture. All experiments were done with a perturbation degree tuned to ensure maximal transferability at minimal visual perceptibility of the attacks. Our experiments show that pre-training may dramatically increase the transferability of adversarial examples, even when the target and surrogate's architectures are different: the larger the performance gain using pre-training, the larger the transferability. Differences in the development data between target and surrogate models considerably decrease the performance of the attack; this decrease is further amplified by difference in the model architecture. We believe these factors should be considered when developing security-critical MedIA systems planned to be deployed in clinical practice.}, + url = {https://arxiv.org/abs/2006.06356}, + file = {Bort20.pdf:pdf\\Bort20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2006.06356}, + automatic = {yes}, } @article{Bort21, @@ -3020,11 +3834,26 @@ @article{Bort21 url = {https://arxiv.org/abs/2006.06356}, author = {Bortsova, Gerda and Gonz\'{a}lez-Gonzalo, Cristina and Wetstein, Suzanne C. and Dubost, Florian and Katramados, Ioannis and Hogeweg, Laurens and Liefers, Bart and van Ginneken, Bram and Pluim, Josien P.W. and Veta, Mitko and S\'{a}nchez, Clara I. and de Bruijne, Marleen}, abstract = {Adversarial attacks are considered a potentially serious security threat for machine learning systems. Medical image analysis (MedIA) systems have recently been argued to be vulnerable to adversarial attacks due to strong financial incentives and the associated technological infrastructure. In this paper, we study previously unexplored factors affecting adversarial attack vulnerability of deep learning MedIA systems in three medical domains: ophthalmology, radiology, and pathology. We focus on adversarial black-box settings, in which the attacker does not have full access to the target model and usually uses another model, commonly referred to as surrogate model, to craft adversarial examples that are then transferred to the target model. We consider this to be the most realistic scenario for MedIA systems. Firstly, we study the effect of weight initialization (pre-training on ImageNet or random initialization) on the transferability of adversarial attacks from the surrogate model to the target model, i.e., how effective attacks crafted using the surrogate model are on the target model. Secondly, we study the influence of differences in development (training and validation) data between target and surrogate models. We further study the interaction of weight initialization and data differences with differences in model architecture. All experiments were done with a perturbation degree tuned to ensure maximal transferability at minimal visual perceptibility of the attacks. Our experiments show that pre-training may dramatically increase the transferability of adversarial examples, even when the target and surrogate's architectures are different: the larger the performance gain using pre-training, the larger the transferability. Differences in the development data between target and surrogate models considerably decrease the performance of the attack; this decrease is further amplified by difference in the model architecture. We believe these factors should be considered when developing security-critical MedIA systems planned to be deployed in clinical practice. We recommend avoiding using only standard components, such as pre-trained architectures and publicly available datasets, as well as disclosure of design specifications, in addition to using adversarial defense methods. When evaluating the vulnerability of MedIA systems to adversarial attacks, various attack scenarios and target-surrogate differences should be simulated to achieve realistic robustness estimates. The code and all trained models used in our experiments are publicly available. - - (The first three authors contributed equally to this work.)}, + + (The first three authors contributed equally to this work.)}, publisher = {Elsevier}, optnote = {DIAG}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/238599}, + all_ss_ids = {['0f779d6ca748e8a73a78f2e1dadaba3161e80950']}, + gscites = {0}, +} + +@article{Bosm21, + author = {Bosma, Joeran S. and Saha, Anindo and Hosseinzadeh, Matin and Slootweg, Ilse and de Rooij, Maarten and Huisman, Henkjan}, + title = {~~Annotation-efficient cancer detection with report-guided lesion annotation for deep learning-based prostate cancer detection in bpMRI}, + doi = {10.48550/ARXIV.2112.05151}, + year = {2021}, + abstract = {Deep learning-based diagnostic performance increases with more annotated data, but large-scale manual annotations are expensive and labour-intensive. Experts evaluate diagnostic images during clinical routine, and write their findings in reports. Leveraging unlabelled exams paired with clinical reports could overcome the manual labelling bottleneck. We hypothesise that detection models can be trained semi-supervised with automatic annotations generated using model predictions, guided by sparse information from clinical reports. To demonstrate efficacy, we train clinically significant prostate cancer (csPCa) segmentation models, where automatic annotations are guided by the number of clinically significant findings in the radiology reports. We included 7,756 prostate MRI examinations, of which 3,050 were manually annotated. We evaluated prostate cancer detection performance on 300 exams from an external centre with histopathology-confirmed ground truth. Semi-supervised training improved patient-based diagnostic area under the receiver operating characteristic curve from $87.2 \pm 0.8\%$ to $89.4 \pm 1.0\%$ ($P<10^{-4}$) and improved lesion-based sensitivity at one false positive per case from $76.4 \pm 3.8\%$ to $83.6 \pm 2.3\%$ ($P<10^{-4}$). Semi-supervised training was 14$\times$ more annotation-efficient for case-based performance and 6$\times$ more annotation-efficient for lesion-based performance. This improved performance demonstrates the feasibility of our training procedure. Source code is publicly available at github.com/DIAGNijmegen/Report-Guided-Annotation. Best csPCa detection algorithm is available at grand-challenge.org/algorithms/bpmri-cspca-detection-report-guided-annotations/.}, + url = {https://arxiv.org/abs/2112.05151}, + file = {Bosm21.pdf:pdf\\Bosm21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2112.05151}, + automatic = {yes}, } @conference{Bosma21a, @@ -3047,7 +3876,32 @@ @mastersthesis{Bosma21b journal = {Master thesis}, } +@article{Bosma23, + title = {Semi-supervised Learning with Report-guided Pseudo Labels for Deep Learning-based Prostate Cancer Detection Using Biparametric MRI}, + author = {Bosma, Joeran S. and Saha, Anindo and Hosseinzadeh, Matin and Slootweg, Ivan and de Rooij, Maarten and Huisman, Henkjan}, + journal = {Radiology: Artificial Intelligence}, + optnote = {DIAG, RADIOLOGY}, + doi = {10.1148/ryai.230031}, + url = {https://pubs.rsna.org/doi/full/10.1148/ryai.230031}, + pages = {e230031}, + year = {2023}, + publisher = {Radiological Society of North America}, + ss_id = {99f201aa5e15e2b52ca9090bae355dfca22577be}, + all_ss_ids = {['99f201aa5e15e2b52ca9090bae355dfca22577be']}, + gscites = {4}, +} +@inproceedings{Bosma23a, + author = {Joeran S. Bosma and Dr{\'e} Peeters and Nat{\'a}lia Alves and Anindo Saha and Zaigham Saghir and Colin Jacobs and Henkjan Huisman}, + booktitle = {Medical Imaging with Deep Learning}, + title = {Reproducibility of Training Deep Learning Models for Medical Image Analysis}, + url = {https://openreview.net/forum?id=MR01DcGST9}, + abstract = {Performance of deep learning algorithms varies due to their development data and training method, but also due to several stochastic processes during training. Due to these random factors, a single training run may not accurately reflect the performance of a given training method. Statistical comparisons in literature between different deep learning training methods typically ignore this performance variation between training runs and incorrectly claim significance of changes in training method. We hypothesize that the impact of such performance variation is substantial, such that it may invalidate biomedical competition leaderboards and some scientific papers. To test this, we investigate the reproducibility of training deep learning algorithms for medical image analysis. We repeated training runs from prior scientific studies: three diagnostic tasks (pancreatic cancer detection in CT, clinically significant prostate cancer detection in MRI, and lung nodule malignancy risk estimation in low-dose CT) and two organ segmentation tasks (pancreas segmentation in CT and prostate segmentation in MRI). A previously published top-performing algorithm for each task was trained multiple times to determine the variance in model performance. For all three diagnostic algorithms, performance variation from retraining was significant compared to data variance. Statistically comparing independently trained algorithms from the same training method using the same dataset should follow the null hypothesis, but we observed claimed significance with a p-value below 0.05 in + of comparisons with conventional testing (paired bootstrapping). We conclude that variance in model performance due to retraining is substantial and should be accounted for.}, + file = {Bosma23a.pdf:pdf\\Bosma23a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} @article{Boss01, author = {E. A. Boss and L. F. Massuger and L. A. Pop and L. C. Verhoef and H. J. Huisman and H. Boonstra and J. O. Barentsz}, @@ -3065,6 +3919,8 @@ @article{Boss01 gsid = {1003186607531091259}, gscites = {49}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/122189}, + ss_id = {9836dc01306350e2f0d22817e0c67b07e7858fa8}, + all_ss_ids = {['9836dc01306350e2f0d22817e0c67b07e7858fa8']}, } @mastersthesis{Botr22, @@ -3093,7 +3949,9 @@ @article{Boue18 pmid = {28943279}, month = {1}, gsid = {17459489425823189277}, - gscites = {20}, + gscites = {44}, + ss_id = {51081160a16f05064f518aa0a5af1a5b075f43e3}, + all_ss_ids = {['51081160a16f05064f518aa0a5af1a5b075f43e3']}, } @conference{Boul22a, @@ -3105,6 +3963,19 @@ @conference{Boul22a year = {2022}, } +@article{Boul23, + author = {Boulogne, Luuk H. and Lorenz, Julian and Kienzle, Daniel and Schon, Robin and Ludwig, Katja and Lienhart, Rainer and Jegou, Simon and Li, Guang and Chen, Cong and Wang, Qi and Shi, Derik and Maniparambil, Mayug and Muller, Dominik and Mertes, Silvan and Schroter, Niklas and Hellmann, Fabio and Elia, Miriam and Dirks, Ine and Bossa, Matias Nicolas and Berenguer, Abel Diaz and Mukherjee, Tanmoy and Vandemeulebroucke, Jef and Sahli, Hichem and Deligiannis, Nikos and Gonidakis, Panagiotis and Huynh, Ngoc Dung and Razzak, Imran and Bouadjenek, Reda and Verdicchio, Mario and Borrelli, Pasquale and Aiello, Marco and Meakin, James A. and Lemm, Alexander and Russ, Christoph and Ionasec, Razvan and Paragios, Nikos and van Ginneken, Bram and Dubois, Marie-Pierre Revel}, + title = {~~The STOIC2021 COVID-19 AI challenge: applying reusable training methodologies to private data}, + doi = {10.48550/ARXIV.2306.10484}, + year = {2023}, + abstract = {Challenges drive the state-of-the-art of automated medical image analysis. The quantity of public training data that they provide can limit the performance of their solutions. Public access to the training methodology for these solutions remains absent. This study implements the Type Three (T3) challenge format, which allows for training solutions on private data and guarantees reusable training methodologies. With T3, challenge organizers train a codebase provided by the participants on sequestered training data. T3 was implemented in the STOIC2021 challenge, with the goal of predicting from a computed tomography (CT) scan whether subjects had a severe COVID-19 infection, defined as intubation or death within one month. STOIC2021 consisted of a Qualification phase, where participants developed challenge solutions using 2000 publicly available CT scans, and a Final phase, where participants submitted their training methodologies with which solutions were trained on CT scans of 9724 subjects. The organizers successfully trained six of the eight Final phase submissions. The submitted codebases for training and running inference were released publicly. The winning solution obtained an area under the receiver operating characteristic curve for discerning between severe and non-severe COVID-19 of 0.815. The Final phase solutions of all finalists improved upon their Qualification phase solutions.HSUXJM-TNZF9CHSUXJM-TNZF9C}, + url = {https://arxiv.org/abs/2306.10484}, + file = {Boul23.pdf:pdf\\Boul23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2306.10484}, + automatic = {yes}, +} + @inproceedings{Boze12, author = {J. Bozek and M. G. J. Kallenberg and M. Grgic and N. Karssemeijer}, title = {Comparison of Lesion Size Using Area and Volume in Full Field Digital Mammograms}, @@ -3119,6 +3990,8 @@ @inproceedings{Boze12 optnote = {DIAG, RADIOLOGY}, gsid = {3825556773874521565}, gscites = {2}, + ss_id = {52f2a5c63c6baad0b75adbd0667f6d63f0e7b134}, + all_ss_ids = {['52f2a5c63c6baad0b75adbd0667f6d63f0e7b134']}, } @article{Boze14, @@ -3136,7 +4009,9 @@ @article{Boze14 pmid = {24506623}, month = {1}, gsid = {13159064489659255149}, - gscites = {1}, + gscites = {8}, + ss_id = {d300479cd834d7c2a32ef4db2825903170d74ce5}, + all_ss_ids = {['d300479cd834d7c2a32ef4db2825903170d74ce5']}, } @article{Bozo16, @@ -3152,6 +4027,58 @@ @article{Bozo16 optnote = {DIAG}, pmid = {26251260}, month = {8}, + ss_id = {15c5713c4b4c87c587b2df572c32fd121cf0d081}, + all_ss_ids = {['15c5713c4b4c87c587b2df572c32fd121cf0d081']}, + gscites = {7}, +} + +@article{Bozo17, + author = {Bozovic, Gracijela and Adlercreutz, Catharina and H\"{o}glund, Peter and Bj\"{o}rkman-Burtscher, Isabella and Reinstrup, Peter and Ingemansson, Richard and Schaefer-Prokop, Cornelia and Siemund, Roger and Geijer, Mats}, + title = {~~Imaging of the Lungs in Organ Donors and its Clinical Relevance}, + doi = {10.1097/rti.0000000000000255}, + year = {2017}, + abstract = { + Purpose: + The aim of the study was to retrospectively evaluate the diagnostic imaging that potential lung donors undergo, the reader variability of image interpretation and its relevance for donation, and the potential information gained from imaging studies not primarily intended for lung evaluation but partially including them. + + + Materials and Methods: + Bedside chest radiography and computed tomography (CT), completely or incompletely including the lungs, of 110 brain-dead potential organ donors in a single institution during 2007 to 2014 were reviewed from a donation perspective. Two chest radiologists in consensus analyzed catheters and cardiovascular, parenchymal, and pleural findings. Clinical reports and study review were compared for substantial differences in findings that could have led to a treatment change, triggered additional examinations such as bronchoscopy, or were considered important for donation. + + + Results: + Among 136 bedside chest radiographs, no differences between clinical reports and study reviews were found in 37 (27%), minor differences were found in 28 (21%), and substantial differences were found in 71 (52%) examinations (P<0.0001). In 31 of 42 (74%) complete or incomplete CT examinations, 50 of 74 findings with relevance for lung donation were not primarily reported (P<0.0001). + + + Conclusions: + The majority of donor patients undergo only chest radiography. A targeted imaging review of abnormalities affecting the decision to use donor lungs may be useful in the preoperative stage. With a targeted list, substantial changes were made from initial clinical interpretations. CT can provide valuable information on donor lung pathology, even if the lungs are only partially imaged. + }, + url = {http://dx.doi.org/10.1097/RTI.0000000000000255}, + file = {Bozo17.pdf:pdf\\Bozo17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Thoracic Imaging}, + automatic = {yes}, + citation-count = {7}, + pages = {107-114}, + volume = {32}, + pmid = {28060192}, +} + +@article{Bozo22, + author = {Bozovic, Gracijela and Schaefer-Prokop, Cornelia M and Bankier, Alexander A}, + title = {~~Pulmonary functional imaging (PFI): A historical review and perspective}, + doi = {10.1177/02841851221076324}, + year = {2022}, + abstract = {PFI Pulmonary Functional Imaging (PFI) refers to visualization and measurement of ventilation, perfusion, gas flow and exchange as well as biomechanics. In this review, we will highlight the historical development of PFI, describing recent advances and listing the various techniques for PFI offered per modality. Challenges PFI is facing and requirements for PFI from a clinical point of view will be pointed out. Hereby the review is meant as an introduction to PFI.}, + url = {http://dx.doi.org/10.1177/02841851221076324}, + file = {Bozo22.pdf:pdf\\Bozo22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Acta Radiologica}, + automatic = {yes}, + citation-count = {0}, + pages = {90-100}, + volume = {64}, + pmid = {35118881}, } @article{Brak00, @@ -3168,6 +4095,8 @@ @article{Brak00 month = {9}, gsid = {11284350184411772588}, gscites = {151}, + ss_id = {29dab22d41369fe0af3dd6bca85e0c5aa6859ded}, + all_ss_ids = {['29dab22d41369fe0af3dd6bca85e0c5aa6859ded']}, } @phdthesis{Brak00a, @@ -3176,7 +4105,7 @@ @phdthesis{Brak00a year = {2000}, url = {http://repository.ubn.ru.nl/handle/2066/18825}, abstract = {This thesis describes the components of an automated detection method for masses and architectural distortions, signs of infiltrating cancer. Masses and architectural distortions can be very subtle and are frequently missed by radiologists. Because the success of treatment of breast cancer depends largely on the stage of the tumor at the time of detection, early detection is very important. Masses have two main image characteristics that can be used for detection: a radiating pattern of spicules and a mass. Sometimes both characteristics are present, but often only spicules or just a faint mass is visible. To achieve high sensitivity on the whole spectrum of possible appearances of masses and distortions, detection of both characteristics is essential. Chapter 2 describes a sensitive method to detect radiating spicule patterns using statistical analysis of line orientations. However, many masses do not show clear spiculation, and must be detected by their mass. Chapter 3 describes how the spicule detection method can be transformed to a mass detection method. Instead of a map of line orientations, a map of gradient orientations is computed. Statistical analysis of this orientation map was used to detect masses. A large set of mammograms taken from the Nijmegen screening program was used to test a detection method based on spicules, a detection method based on masses, and a detection method that detects both spicules and masses. Best results were obtained when both the spiculation and mass features were used. Of all masses, 85% was detected at a specificity level of 1 false positive per image, 55% at 1 false positive per 10 images. The diameter of masses in mammograms varies from 5 mm to 5 cm, inspiring many research groups to use multi-scale approaches to detect masses. However, the benefit of applying their method in a multi-scale way is almost never compared to a single-scale version of their method. In Chapter 4, the mass detection method of Chapter 3 and two popular pattern recognition techniques to detect bright areas were applied in a single and multi-scale way to examine the possible gain of multi-scale detection. It appeared that the multi-scale versions of the mass detection method had similar performance as a single-scale approach if this scale was chosen appropriately. Of course, when the scale for the single-scale approach was chosen sub-optimally the performance was lower. This study shows that it is not self-evident that a multi-scale mass detection method gives better results than a single-scale version of the method. A multi-scale method is sensitive for masses over a range of sizes, but is also sensitive for false positives of different sizes. The specificity level that was achieved by the mass detection method described in Chapter 3 is not high enough for successful application in the clinic or in screening. To improve the specificity, a second stage was designed, that classifies each detected region based on regional criteria like contrast, shape, and texture. Based on such features, many normal tissue regions could be discriminated from real masses. To compute these features, a segmentation of the suspicious regions is required. In Chapter 5, a method is described to segment masses 126 using a discrete dynamic contour model. For each region a size estimate was available of the suspect region, and an appropriate initial starting contour was created that was fitted to the edge of the region. The method proved to be fast and robust, and outperformed a region growing approach. In Chapter 6, the contour model was used to segment regions that were found by the mass detection method of Chapter 3. A number of features were implemented that capture image characteristics that radiologists use to determine whether a suspicious region is a mass or dense normal tissue. Classification using these regional features gave a large reduction in false positives at each desired sensitivity level. On two large datasets a relatively high sensitivity was achieved even at high specificity levels. In Chapter 7, - all segmentation methods of Chapter 5 were used to segment and classify the detected regions. The adaptive discrete contour method that was used in Chapter 6 and the preprocessed probabilistic region growing method gave similar results. The experiments of Chapter 8 showed that a substantial number of the tumors that were missed by radiologists in a screening program despite double reading, were detected by the mass detection method of Chapter 3. Successful detection of missed tumors indicates that a CAD system can be a useful tool for radiologists if the prompts are sufficiently specific. Chapter 9 describes two experiments that were done using a commercially available prompting device. A large experiment showed that the specificity of radiologists does not decrease when they are prompted. This is an important result because some fear that the large number of false positive prompts of a CAD system might increases the recall rate. Results of a second experiment indicated that radiologists have much more difficulty with interpreting suspicious signs than is generally believed. It seems that many screening errors that are thought to be due to oversight, are due to misinterpretation. Both experiments showed large differences in the performance levels of radiologists. Detection of masses is reaching a level of performance where successful use in screening or clinical practice is possible. Approximately 75% of all masses are detected in at least one view at a specificity level of 0.1 false positives per image. Improvement of the mass and spicule features is still possible, and more sophisticated features can be used to remove false positives. Because the data sets that are used for training are becoming larger, better classifiers can be produced. A considerable improvement can be expected when suspicious regions in one view are correlated to suspicious regions in the other view. Many strong false positives are only present in one of the views, real lesions are most often visible in both. Together with asymmetry features and a method to detect temporal changes in mammograms, another considerable reduction in false positives seems possible}, + all segmentation methods of Chapter 5 were used to segment and classify the detected regions. The adaptive discrete contour method that was used in Chapter 6 and the preprocessed probabilistic region growing method gave similar results. The experiments of Chapter 8 showed that a substantial number of the tumors that were missed by radiologists in a screening program despite double reading, were detected by the mass detection method of Chapter 3. Successful detection of missed tumors indicates that a CAD system can be a useful tool for radiologists if the prompts are sufficiently specific. Chapter 9 describes two experiments that were done using a commercially available prompting device. A large experiment showed that the specificity of radiologists does not decrease when they are prompted. This is an important result because some fear that the large number of false positive prompts of a CAD system might increases the recall rate. Results of a second experiment indicated that radiologists have much more difficulty with interpreting suspicious signs than is generally believed. It seems that many screening errors that are thought to be due to oversight, are due to misinterpretation. Both experiments showed large differences in the performance levels of radiologists. Detection of masses is reaching a level of performance where successful use in screening or clinical practice is possible. Approximately 75% of all masses are detected in at least one view at a specificity level of 0.1 false positives per image. Improvement of the mass and spicule features is still possible, and more sophisticated features can be used to remove false positives. Because the data sets that are used for training are becoming larger, better classifiers can be produced. A considerable improvement can be expected when suspicious regions in one view are correlated to suspicious regions in the other view. Many strong false positives are only present in one of the views, real lesions are most often visible in both. Together with asymmetry features and a method to detect temporal changes in mammograms, another considerable reduction in false positives seems possible}, copromotor = {N. Karssemeijer}, file = {Brak00a.pdf:pdf\\Brak00a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, @@ -3203,6 +4132,8 @@ @article{Brak01 month = {2}, gsid = {15867724720093407015}, gscites = {109}, + ss_id = {e7aa02db15ac1c46249be1231b3fa693792844a6}, + all_ss_ids = {['e7aa02db15ac1c46249be1231b3fa693792844a6']}, } @article{Brak98, @@ -3219,6 +4150,8 @@ @article{Brak98 month = {5}, gsid = {3316279620693133833}, gscites = {106}, + ss_id = {a0a0fcedd92107cc3f115f323ec0650012654994}, + all_ss_ids = {['a0a0fcedd92107cc3f115f323ec0650012654994']}, } @article{Brak99, @@ -3236,6 +4169,8 @@ @article{Brak99 month = {7}, gsid = {9014789287224804562}, gscites = {161}, + ss_id = {b301e9c53ffbfa0df00feed1b50c6438d16c0c08}, + all_ss_ids = {['b301e9c53ffbfa0df00feed1b50c6438d16c0c08']}, } @inproceedings{Brak99b, @@ -3253,6 +4188,8 @@ @inproceedings{Brak99b month = {5}, gsid = {5415364043687958742}, gscites = {9}, + ss_id = {e08c326631cc5359840d77beb1de4f6927a30390}, + all_ss_ids = {['e08c326631cc5359840d77beb1de4f6927a30390']}, } @article{Bran11, @@ -3270,7 +4207,9 @@ @article{Bran11 pmid = {21609879}, month = {10}, gsid = {18263174567335478043}, - gscites = {27}, + gscites = {28}, + ss_id = {aaa9f7fc2e216f4f0ce40ef000d1ac9ed3a991e9}, + all_ss_ids = {['aaa9f7fc2e216f4f0ce40ef000d1ac9ed3a991e9']}, } @inproceedings{Brec08, @@ -3320,7 +4259,6 @@ @article{Brec12 gscites = {21}, } - @article{Breu14, author = {Breuninger, Marianne and van Ginneken, Bram and Philipsen, Rick H H M. and Mhimbira, Francis and Hella, Jerry J. and Lwilla, Fred and van den Hombergh, Jan and Ross, Amanda and Jugheli, Levan and Wagner, Dirk and Reither, Klaus}, title = {Diagnostic accuracy of computer-aided detection of pulmonary tuberculosis in chest radiographs: a validation study from sub-saharan {A}frica}, @@ -3336,7 +4274,9 @@ @article{Breu14 pmid = {25192172}, month = {9}, gsid = {7079580458158799529}, - gscites = {60}, + gscites = {77}, + ss_id = {7e2cc3c4c648d38b74c4fe802672198067ddebf7}, + all_ss_ids = {['7e2cc3c4c648d38b74c4fe802672198067ddebf7']}, } @article{Bria13, @@ -3354,7 +4294,9 @@ @article{Bria13 pmid = {24292553}, month = {2}, gsid = {4547748588204966796}, - gscites = {64}, + gscites = {77}, + ss_id = {3679f6f835dac7059cf470c5251b6e7f3e748359}, + all_ss_ids = {['3679f6f835dac7059cf470c5251b6e7f3e748359']}, } @inproceedings{Bria16a, @@ -3371,7 +4313,9 @@ @inproceedings{Bria16a file = {:pdf/Bria16a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {10449433380463654551}, - gscites = {4}, + gscites = {5}, + ss_id = {2af2d2173999744b98665922265bb69a8bccf6ef}, + all_ss_ids = {['2af2d2173999744b98665922265bb69a8bccf6ef']}, } @inproceedings{Bria16b, @@ -3389,6 +4333,23 @@ @inproceedings{Bria16b optnote = {DIAG, RADIOLOGY}, gsid = {6212175193964795667}, gscites = {16}, + ss_id = {3e8abcfa3851f8cbb06dacc2a7ea96305d0714e9}, + all_ss_ids = {['3e8abcfa3851f8cbb06dacc2a7ea96305d0714e9']}, +} + +@book{Bria17, + author = {Bria, Alessandro and Marrocco, Claudio and Galdran, Adrian and Campilho, Aur\'{e}lio and Marchesi, Agnese and Mordang, Jan-Jurre and Karssemeijer, Nico and Molinara, Mario and Tortorella, Francesco}, + title = {~~Spatial Enhancement by Dehazing for Detection of Microcalcifications with Convolutional Nets}, + doi = {10.1007/978-3-319-68548-9_27}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-319-68548-9_27}, + file = {Bria17.pdf:pdf\\Bria17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Image Analysis and Processing - ICIAP 2017}, + automatic = {yes}, + citation-count = {9}, + pages = {288-298}, } @article{Bria18, @@ -3406,7 +4367,23 @@ @article{Bria18 optnote = {DIAG}, pmid = {29994062}, gsid = {7371071681059227535}, - gscites = {3}, + gscites = {10}, + ss_id = {3f632517cafc2e8159b1c4c0c6f5375248dfe8db}, + all_ss_ids = {['3f632517cafc2e8159b1c4c0c6f5375248dfe8db']}, +} + +@inproceedings{Bria18a, + author = {Bria, Alessandro and Savelli, Benedetta and Marrocco, Claudio and Mordang, Jan-Jurre and Molinara, Mario and Karssemeijer, Nico and Tortorella, Francesco}, + title = {~~Improving the automated detection of calcifications by combining deep cascades and deep convolutional nets}, + doi = {10.1117/12.2318016}, + year = {2018}, + abstract = {Recently, both Deep Cascade classifiers and Convolutional Neural Networks (CNNs) have achieved state-ofthe-art microcalcification (MC) detection performance in digital mammography. Deep Cascades consist in long sequences of weak classifiers designed to effectively learn from heavily unbalanced data as in the case of MCs (~ 1 MC every 10, 000 non-MC samples). CNNs are powerful models that achieve impressive results for image classification thanks to the ability to automatically extract general-purpose features from the data, but require balanced classes. In this work, we introduce a two-stage classification scheme that combines the benefits of both systems. Firstly, Deep Cascades are trained by requiring a very high sensitivity (99.5%) throughout the sequence of classifiers. As a result, while the number of MC samples remains practically unchanged, the number of non-MC samples is greatly reduced. The remaining data, approximately balanced, are used to train an additional stage of classification with a CNN. We evaluated the proposed approach on a database of 1, 066 digital mammograms. MC detection results of the combined classification were statistically significantly higher than Deep Cascade and CNN alone, yielding an average improvement in mean sensitivity of 3.19% and 2.45%, respectively. Remarkably, the proposed system also yielded a faster per-mammogram processing time (2.0s) compared to Deep Cascade (2.5s) and CNN (5.7s).}, + url = {http://dx.doi.org/10.1117/12.2318016}, + file = {Bria18a.pdf:pdf\\Bria18a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {14th International Workshop on Breast Imaging (IWBI 2018)}, + automatic = {yes}, + citation-count = {2}, } @conference{Brin13, @@ -3478,6 +4455,21 @@ @inproceedings{Broe06 gscites = {3}, } +@article{Brou23, + author = {Brouwer, N.P.M. and Khan, A. and Bokhorst, J.M. and Ayatollahi, F. and Hay, J. and Ciompi, F. and Simmer, F. and Hugen, N. and de Wilt, J.H.W. and Berger, M.D. and Lugli, A. and Zlobec, I. and Edwards, J. and Nagtegaal, I.D.}, + title = {The complexity of shapes; how the circularity of tumor nodules impacts prognosis in colorectal cancer}, + doi = {10.1016/j.modpat.2023.100376}, + pages = {100376}, + url = {http://dx.doi.org/10.1016/j.modpat.2023.100376}, + abstract = {Abstract unavailable}, + automatic = {yes}, + citation-count = {0}, + file = {Brou23.pdf:pdf\\Brou23.pdf:PDF}, + journal = {Modern Pathology}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + @book{Brow08a, author = {M. Brown and M. de Bruijne and B. van Ginneken and A. Kiraly and J.-M. Kuhnigk and C. Lorenz and K. Mori and J. Reinhardt}, title = {The {F}irst {I}nternational {W}orkshop on {P}ulmonary {I}mage {A}nalysis}, @@ -3487,6 +4479,8 @@ @book{Brow08a abstract = {Both the quantity and quality of image data available to study the pulmonary system have increased enormously in the last decade. The goal of this workshop is to bring together researchers in pulmonary image analysis and discuss recent advances in this rapidly developing field. We invited papers on all aspects of image analysis of pulmonary image data, including segmentation, registration, quantification, modeling of the image acquisition process, visualization, statistical modeling, biophysical modeling of the lungs (computational anatomy), and novel applications. In addition, we want to address the effective use of these methodologies for diagnosis and therapy in clinical applications, bringing together theory and practice, by including a hands-on demo session focusing on clinical workstations for pulmonary analysis. We received many high quality submissions covering a broad spectrum of issues in pulmonary image analysis. All papers underwent a thorough review process with 3-4 reviews per paper by members of the program committee and additional reviewers. We finally accepted 12 papers for oral presentation, 16 poster presentations, and 3 papers describing software systems which will be demonstrated during the poster and demo session.}, file = {Brow08a.pdf:pdf\\Brow08a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['57cc1d7dea1252afd3b287fce6551430c87226ff']}, + gscites = {12}, } @book{Brow09, @@ -3498,6 +4492,8 @@ @book{Brow09 abstract = {After the successful first edition of the International Workshop on Pulmonary Image Analysis at MICCAI 2008 in New York City, the entire organizing team volunteered to organize the second edition of this event, aimed at bringing together researchers in pulmonary image analysis to discuss recent advances in this rapidly developing field. The Second International Workshop on Pulmonary Image Analysis will be held on September 20, 2009 in London, UK, again as a workshop of the MICCAI conference. Two researchers later joined the organizing team. We received many high quality submissions for this workshop. All papers underwent a thorough review process with two to four reviews per paper by members of the program committee and additional reviewers. The proceedings of this workshop consist of three parts. There are fifteen regular papers, dealing with various aspects of image analysis of pulmonary image data, including segmentation, registration, and quantification of abnormalities in various modalities, with the focus in most studies on computed tomography, but also with papers on the analysis of MRI and X-ray scans. Next to these regular papers, we invited researchers to join in two comparative studies where algorithms were applied to a common data set, and submit a paper to the workshop about their system. The first of these challenges is EXACT09, on the extraction of the pulmonary airway tree from CT data. The second one, VOLCANO'09, is on the analysis of size changes in pulmonary nodules from consecutive CT scans. The results of these challenges are described in two overview papers that can be found in these proceedings. Moreover, fifteen papers describe systems that participated in the EXACT09 challenge and three papers describe algorithms that were used for the VOLCANO'09 challenge. That challenge attracted thirteen participating teams who applied algorithms, often previously published and not described in these proceedings, to the challenge data.}, file = {Brow09.pdf:pdf\\Brow09.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['858f620084a0676d77b2b52e983b2632e92b7def']}, + gscites = {11}, } @book{Brow10, @@ -3526,6 +4522,8 @@ @inproceedings{Brui02 optnote = {DIAG, RADIOLOGY}, month = {5}, gscites = {61}, + ss_id = {1c04dc70e53afab5482a7adef2ddc5f707383deb}, + all_ss_ids = {['1c04dc70e53afab5482a7adef2ddc5f707383deb']}, } @inproceedings{Brui03, @@ -3543,6 +4541,8 @@ @inproceedings{Brui03 pmid = {15344453}, gsid = {2360432872690580120}, gscites = {185}, + ss_id = {0778bee7108adc686ab78dcd98758c76bf14bd36}, + all_ss_ids = {['0778bee7108adc686ab78dcd98758c76bf14bd36']}, } @inproceedings{Brui03a, @@ -3559,6 +4559,7 @@ @inproceedings{Brui03a optnote = {DIAG, RADIOLOGY}, gsid = {10995536154405183851}, gscites = {18}, + all_ss_ids = {['1ee7b2b9e546979b9985722ee3e1353abc199731']}, } @inproceedings{Brui03b, @@ -3575,7 +4576,9 @@ @inproceedings{Brui03b optnote = {DIAG, RADIOLOGY}, month = {5}, gsid = {5651884635784583934}, - gscites = {25}, + gscites = {26}, + ss_id = {00559146cc1276c6c98401e2aa35e591ebccca76}, + all_ss_ids = {['00559146cc1276c6c98401e2aa35e591ebccca76']}, } @article{Brui04, @@ -3593,7 +4596,9 @@ @article{Brui04 pmid = {15063862}, month = {6}, gsid = {14139207650278234060}, - gscites = {127}, + gscites = {128}, + ss_id = {c6736d66d1088989faf12ba082dae19132baed4d}, + all_ss_ids = {['c6736d66d1088989faf12ba082dae19132baed4d']}, } @inproceedings{Brun07, @@ -3640,6 +4645,9 @@ @inproceedings{Brun10 optnote = {DIAG, RADIOLOGY}, number = {Pt 1}, pmid = {20879229}, + ss_id = {f5a131da8468342f2af75863a762a56ab95ab8a1}, + all_ss_ids = {['f5a131da8468342f2af75863a762a56ab95ab8a1']}, + gscites = {4}, } @article{Brun11, @@ -3655,6 +4663,9 @@ @article{Brun11 optnote = {DIAG, RADIOLOGY}, pmid = {21800960}, month = {11}, + ss_id = {4304be3b3a04f1effbd488d408c812fcb8c44358}, + all_ss_ids = {['4304be3b3a04f1effbd488d408c812fcb8c44358']}, + gscites = {55}, } @phdthesis{Brun11a, @@ -3663,7 +4674,7 @@ @phdthesis{Brun11a year = {2011}, url = {http://repository.tue.nl/715250}, abstract = {Deep brain stimulation of the subthalamic nucleus (STN) has gained momentum as a therapy for advanced ParkinsonA-A?A 1/2 s disease. The stimulation effectively alleviates the patientsA-A?A 1/2 typical motor symptoms on a long term, but can give rise to cognitive and psychiatric adverse effects as well. Based on primate studies, the STN has been divided into three functionally different parts, which were distinguished by their afferent and efferent connections. The largest part is the motor area, followed by an associative and a limbic area. The serious adverse effects on cognition and behavior occurring after deep brain stimulation are assumed to be caused by electrical current spread to the associative and limbic areas of the STN. Therefore, selective stimulation of the motor part of the STN seems crucial, both to obtain the best possible therapeutic effect on the motor symptoms and to minimize the debilitating effects on cognition and behavior. However, current medical imaging techniques do not yet facilitate the required accurate identification of the STN itself, let alone its different functional areas. The final target for DBS is still often adjusted using intraoperative electrophysiology. Therefore, in this thesis we aimed to improve imaging for deep brain stimulation using noninvasive MRI protocols, in order to identify the STN and its motor part. We studied the advantages and drawbacks of already available noninvasive methods to target the STN. This review did not lead to a straightforward conclusion; identification of the STN motor part remained an open question. In follow-up on this question, we investigated the possibility to distinguish the different functional STN parts based on their connectivity information. Three types of information were carefully analyzed in this thesis. First, we looked into the clustering of local diffusion information within the STN region. We visually inspected the complex diffusion profiles, derived from postmortem rat brain data with high angular resolution, and augmented this manual segmentation method using k-means and graph cuts clustering. Because the weighing of different orders of diffusion information in the traditionally used L2 norm on the orientation distribution functions (ODFs) remained an open issue, we developed a specialized distance measure, the so-called Sobolev norm. This norm does not only take into account the amplitudes of the diffusion profiles, but also their extrema. We showed it to perform better than the L2 norm on synthetic phantom data and real brain (thalamus) data. The research done on this topic facilitates better classification by clustering of gray matter structures in the (deep) brain. Secondly, we were the first to analyze the STNA-A?A 1/2 s full structural connectivity, based on probabilistic fiber tracking in diffusion MRI data of healthy volunteers. The results correspond well to topical literature on STN projections. Furthermore, we assessed the structural connectivity per voxel of the STN seed region and discovered a gradient in connectivity to the premotor cortex within the STN. While going from the medial to the lateral part of the STN, the connectivity increases, confirming the expected lateral location of the STN motor part. Finally, the connectivity analysis produced evidence for the existence of a A-A?A 1/2 hyperdirect?? pathway between the motor cortex and the STN in humans, which is very useful for future research into stimulation targets. The results of these experiments indicate that it is possible to find the motor part of the STN as specific target for deep brain stimulation using structural connectivity information acquired in a noninvasive way. Third and last, we studied functional connectivity using resting state functional MRI data of healthy volunteers. The resulting significant clusters provided us with the first complete description of the STNA-A?A 1/2 s resting state functional connectivity, which corresponds with the expectations based on available literature. Moreover, we performed a reverse regression procedure with the average time - series signals in motor and limbic areas as principal regressors. The results were analyzed for each STN voxel separately and also showed mediolateral gradients in functional connectivity within the STN. The lateral STN part exhibited more motor connectivity, while the medial part seemed to be more functionally connected to limbic brain areas, as described in neuronal tracer studies. These results show that functional connectivity analysis also is a viable noninvasive method to find the motor part of the STN. The work on noninvasive MRI methods for identification of the STN and its functional parts, as presented in this thesis, thus contributes to future specific stimulation of the motor part of the STN for deep brain stimulation in patients with ParkinsonA-A?A 1/2 s disease. This may help to maximize the motor effects and minimize severe cognitive and psychiatric side effects.}, + series signals in motor and limbic areas as principal regressors. The results were analyzed for each STN voxel separately and also showed mediolateral gradients in functional connectivity within the STN. The lateral STN part exhibited more motor connectivity, while the medial part seemed to be more functionally connected to limbic brain areas, as described in neuronal tracer studies. These results show that functional connectivity analysis also is a viable noninvasive method to find the motor part of the STN. The work on noninvasive MRI methods for identification of the STN and its functional parts, as presented in this thesis, thus contributes to future specific stimulation of the motor part of the STN for deep brain stimulation in patients with ParkinsonA-A?A 1/2 s disease. This may help to maximize the motor effects and minimize severe cognitive and psychiatric side effects.}, copromotor = {B. Platel}, file = {Brun11a.pdf:pdf/Brun11a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, @@ -3687,7 +4698,9 @@ @article{Brun12 pmid = {22768059}, month = {6}, gsid = {7953209599105730739}, - gscites = {97}, + gscites = {135}, + ss_id = {3d346a616566ceafe3a09acdd4831df76c408b33}, + all_ss_ids = {['3d346a616566ceafe3a09acdd4831df76c408b33']}, } @conference{Buka17, @@ -3713,7 +4726,9 @@ @inproceedings{Bult18 optnote = {DIAG}, month = {3}, gsid = {14179800442563998624}, - gscites = {12}, + gscites = {16}, + ss_id = {add18010e1af63998bae7573f4cd5d2843eeb5bb}, + all_ss_ids = {['add18010e1af63998bae7573f4cd5d2843eeb5bb']}, } @inproceedings{Bult18a, @@ -3726,7 +4741,9 @@ @inproceedings{Bult18a file = {Bult18a.pdf:pdf\\Bult18a.pdf:PDF}, optnote = {DIAG}, gsid = {18442552638759628545}, - gscites = {8}, + gscites = {24}, + ss_id = {eed96a595b9c988affacac34c57128bd115020f0}, + all_ss_ids = {['eed96a595b9c988affacac34c57128bd115020f0']}, } @article{Bult19, @@ -3745,7 +4762,9 @@ @article{Bult19 pmid = {30696866}, month = {1}, gsid = {10102326358237382134}, - gscites = {39}, + gscites = {120}, + ss_id = {d5b8fba71671814445de80d36a2d563f43eb9d07}, + all_ss_ids = {['d5b8fba71671814445de80d36a2d563f43eb9d07']}, } @conference{Bult19a, @@ -3756,7 +4775,8 @@ @conference{Bult19a abstract = {Grading prostate cancer is a time-consuming process and suffers from high inter- and intra-observer variability. Advances in computer-aided diagnosis have shown promise in improving histopathological diagnosis. We trained a deep learning system using data retrieved from the patients records to grade digitized prostate biopsies. Our system is the first that can automatically classify background, benign epithelium, Gleason 3, 4, and 5 on a gland-by-gland level in prostate biopsies. 532 glass slides containing 2162 prostate biopsies, evaluated by an experienced urogenital pathologist were collected and scanned. 596 biopsies were kept separate for evaluation, the remaining 1576 were used to train the deep learning algorithm (see table for Gleason grade distribution). A single label denoting the Gleason score (e.g. 3+4=7) was available for each biopsy, without information on tumor location or volume. To generate detailed annotations for training we used two previously trained deep learning networks to first segment the epithelium and, subsequently, to detect cancer. The Gleason grade from the patient record was assigned to the cancerous epithelium. These generated weakly annotated regions of tumor were then used to train a Gleason grading system. To evaluate, the system was applied to the biopsies in the test set. We used the total predicted surface area of each growth pattern to determine the Gleason score of the biopsy. Predicted tumor areas smaller than 15% of total epithelial tissue were considered unreliable (e.g. incomplete glands at the edges of the biopsy) and ignored for slide level classification. For predicted grades only areas larger than 5% of all epithelial tissue were considered, which is also common in clinical practice. Predicting whether a biopsy contains tumor resulted in an accuracy of 86% (linear weighted kappa (k) of 0.73, area under the ROC curve of 0.96). We compared the predicted primary Gleason grade to the one from the pathologists' report. Our system achieved an accuracy of 75% (k 0.64). On predicting the Grade Group (using primary and secondary pattern), our system achieved an accuracy of 67% (k 0.57). Misclassifications of more than one grade are rare. Our deep learning system automatically identifies Gleason patterns and benign tissue on a gland-by-gland basis. This can be used to determine the biopsy-level Grade Group and Gleason score, and show which parts of the tissue contribute to this prediction. Improvements need to be made to decrease misclassifications, for example in areas with inflammation.}, optnote = {DIAG}, gsid = {14775949012876589282}, - gscites = {9}, + gscites = {122}, + all_ss_ids = {8ce6b544554a79e077e5fc52f55ba8234ce606d4}, } @article{Bult20, @@ -3771,26 +4791,28 @@ @article{Bult20 url = {https://arxiv.org/abs/1907.07980}, algorithm = {https://grand-challenge.org/algorithms/gleason-grading-of-prostate-biopsies/}, abstract = {BACKGROUND: - The Gleason score is the strongest correlating predictor of recurrence for prostate cancer, but has substantial inter-observer variability, limiting its usefulness for individual patients. Specialised urological pathologists have greater concordance; however, such expertise is not widely available. Prostate cancer diagnostics could thus benefit from robust, reproducible Gleason grading. We aimed to investigate the potential of deep learning to perform automated Gleason grading of prostate biopsies. - - METHODS: - In this retrospective study, we developed a deep-learning system to grade prostate biopsies following the Gleason grading standard. The system was developed using randomly selected biopsies, sampled by the biopsy Gleason score, from patients at the Radboud University Medical Center (pathology report dated between Jan 1, 2012, and Dec 31, 2017). A semi-automatic labelling technique was used to circumvent the need for manual annotations by pathologists, using pathologists' reports as the reference standard during training. The system was developed to delineate individual glands, assign Gleason growth patterns, and determine the biopsy-level grade. For validation of the method, a consensus reference standard was set by three expert urological pathologists on an independent test set of 550 biopsies. Of these 550, 100 were used in an observer experiment, in which the system, 13 pathologists, and two pathologists in training were compared with respect to the reference standard. The system was also compared to an external test dataset of 886 cores, which contained 245 cores from a different centre that were independently graded by two pathologists. - - FINDINGS: - We collected 5759 biopsies from 1243 patients. The developed system achieved a high agreement with the reference standard (quadratic Cohen's kappa 0*918, 95% CI 0*891-0*941) and scored highly at clinical decision thresholds: benign versus malignant (area under the curve 0*990, 95% CI 0*982-0*996), grade group of 2 or more (0*978, 0*966-0*988), and grade group of 3 or more (0*974, 0*962-0*984). In an observer experiment, the deep-learning system scored higher (kappa 0*854) than the panel (median kappa 0*819), outperforming 10 of 15 pathologist observers. On the external test dataset, the system obtained a high agreement with the reference standard set independently by two pathologists (quadratic Cohen's kappa 0*723 and 0*707) and within inter-observer variability (kappa 0*71). - - INTERPRETATION: - Our automated deep-learning system achieved a performance similar to pathologists for Gleason grading and could potentially contribute to prostate cancer diagnosis. The system could potentially assist pathologists by screening biopsies, providing second opinions on grade group, and presenting quantitative measurements of volume percentages. - - FUNDING: - Dutch Cancer Society.}, + The Gleason score is the strongest correlating predictor of recurrence for prostate cancer, but has substantial inter-observer variability, limiting its usefulness for individual patients. Specialised urological pathologists have greater concordance; however, such expertise is not widely available. Prostate cancer diagnostics could thus benefit from robust, reproducible Gleason grading. We aimed to investigate the potential of deep learning to perform automated Gleason grading of prostate biopsies. + + METHODS: + In this retrospective study, we developed a deep-learning system to grade prostate biopsies following the Gleason grading standard. The system was developed using randomly selected biopsies, sampled by the biopsy Gleason score, from patients at the Radboud University Medical Center (pathology report dated between Jan 1, 2012, and Dec 31, 2017). A semi-automatic labelling technique was used to circumvent the need for manual annotations by pathologists, using pathologists' reports as the reference standard during training. The system was developed to delineate individual glands, assign Gleason growth patterns, and determine the biopsy-level grade. For validation of the method, a consensus reference standard was set by three expert urological pathologists on an independent test set of 550 biopsies. Of these 550, 100 were used in an observer experiment, in which the system, 13 pathologists, and two pathologists in training were compared with respect to the reference standard. The system was also compared to an external test dataset of 886 cores, which contained 245 cores from a different centre that were independently graded by two pathologists. + + FINDINGS: + We collected 5759 biopsies from 1243 patients. The developed system achieved a high agreement with the reference standard (quadratic Cohen's kappa 0*918, 95% CI 0*891-0*941) and scored highly at clinical decision thresholds: benign versus malignant (area under the curve 0*990, 95% CI 0*982-0*996), grade group of 2 or more (0*978, 0*966-0*988), and grade group of 3 or more (0*974, 0*962-0*984). In an observer experiment, the deep-learning system scored higher (kappa 0*854) than the panel (median kappa 0*819), outperforming 10 of 15 pathologist observers. On the external test dataset, the system obtained a high agreement with the reference standard set independently by two pathologists (quadratic Cohen's kappa 0*723 and 0*707) and within inter-observer variability (kappa 0*71). + + INTERPRETATION: + Our automated deep-learning system achieved a performance similar to pathologists for Gleason grading and could potentially contribute to prostate cancer diagnosis. The system could potentially assist pathologists by screening biopsies, providing second opinions on grade group, and presenting quantitative measurements of volume percentages. + + FUNDING: + Dutch Cancer Society.}, file = {:pdf/Bult20.pdf:PDF}, optnote = {DIAG}, pmid = {31926805}, month = {2}, gsid = {11015417795282172712}, - gscites = {29}, + gscites = {435}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/217309}, + ss_id = {39a52867ea3d60ec4f28182e0e618b762d3d7909}, + all_ss_ids = {['c310babca20446de2ee7d8857abe239e0fe261a0', '24b8cee45431f633d2fa6e3c05670f62b1e41e7e', '39a52867ea3d60ec4f28182e0e618b762d3d7909']}, } @article{Bult20a, @@ -3805,6 +4827,9 @@ @article{Bult20a url = {https://doi.org/10.1038/s41379-020-0640-y}, file = {:pdf/Bult20a.pdf:PDF}, optnote = {DIAG}, + ss_id = {06bcda243a29be0892422c60b663f80cff21978a}, + all_ss_ids = {['06bcda243a29be0892422c60b663f80cff21978a']}, + gscites = {88}, } @article{Bult22, @@ -3819,6 +4844,9 @@ @article{Bult22 optnote = {DIAG, PATHOLOGY}, pmid = {35027755}, year = {2022}, + ss_id = {551a32d6bb196127b75d256e8547b81ef67a7ad3}, + all_ss_ids = {['551a32d6bb196127b75d256e8547b81ef67a7ad3']}, + gscites = {144}, } @phdthesis{Bult22a, @@ -3827,11 +4855,11 @@ @phdthesis{Bult22a year = {2022}, url = {https://repository.ubn.ru.nl/handle/2066/241550}, abstract = {The histological grading of prostate biopsies is a crucial element in the diagnostic pathway of prostate cancer. The known high inter- and intraobserver variability show potential and a need for assisting pathologists in this task. Furthermore, a global shortage of pathologists stresses the demand for reproducible, more efficient, and easily accessible diagnostic solutions. This thesis's primary aim was to investigate and design an AI-based system to detect and grade prostate cancer in biopsies. A second aim was to evaluate the potential clinical merits of AI-assisted grading when such systems are embedded in the pathologist's workflow. To this extent, the following objectives were undertaken as part of this thesis: - - 1. The development of an automated system that can distinguish epithelial tissue from other tissue types within H&E stained prostate specimens (Chapter 2); - 2. The development and validation of an automated system for grading prostate biopsies using the Gleason grading system (Chapter 3); - 3. A multi-center independent evaluation of state-of-the-art algorithms for automated Gleason grading sourced through a large-scale medical AI competition(Chapter 4); - 4. The investigation of the potential merits of AI-assisted grading of prostate cancer through an observer study (Chapter 5).}, + + 1. The development of an automated system that can distinguish epithelial tissue from other tissue types within H&E stained prostate specimens (Chapter 2); + 2. The development and validation of an automated system for grading prostate biopsies using the Gleason grading system (Chapter 3); + 3. A multi-center independent evaluation of state-of-the-art algorithms for automated Gleason grading sourced through a large-scale medical AI competition(Chapter 4); + 4. The investigation of the potential merits of AI-assisted grading of prostate cancer through an observer study (Chapter 5).}, copromotor = {G. Litjens and C. Hulbergen-van de Kaa}, file = {Bult22a.pdf:pdf\\Bult22a.pdf:PDF}, optnote = {DIAG}, @@ -3867,6 +4895,28 @@ @inproceedings{Caba18a file = {Caba18a.pdf:pdf\\Caba18a.pdf:PDF}, optnote = {AXTI, DIAG, RADIOLOGY}, month = {3}, + ss_id = {26b1aec641be34aa1a7a57362b04bd187ee75aa2}, + all_ss_ids = {['26b1aec641be34aa1a7a57362b04bd187ee75aa2']}, + gscites = {5}, +} + +@article{Caba21, + author = {Caballo, Marco and Hernandez, Andrew M. and Lyu, Su Hyun and Teuwen, Jonas and Mann, Ritse M. and van Ginneken, Bram and Boone, John M. and Sechopoulos, Ioannis}, + title = {~~Computer-aided diagnosis of masses in breast computed tomography imaging: deep learning model with combined handcrafted and convolutional radiomic features}, + doi = {10.1117/1.jmi.8.2.024501}, + year = {2021}, + abstract = {Purpose: A computer-aided diagnosis (CADx) system for breast masses is proposed, which incorporates both handcrafted and convolutional radiomic features embedded into a single deep learning model. + Approach: The model combines handcrafted and convolutional radiomic signatures into a multi-view architecture, which retrieves three-dimensional (3D) image information by simultaneously processing multiple two-dimensional mass patches extracted along different planes through the 3D mass volume. Each patch is processed by a stream composed of two concatenated parallel branches: a multi-layer perceptron fed with automatically extracted handcrafted radiomic features, and a convolutional neural network, for which discriminant features are learned from the input patches. All streams are then concatenated together into a final architecture, where all network weights are shared and the learning occurs simultaneously for each stream and branch. The CADx system was developed and tested for diagnosis of breast masses (N = 284) using image datasets acquired with independent dedicated breast computed tomography systems from two different institutions. The diagnostic classification performance of the CADx system was compared against other machine and deep learning architectures adopting handcrafted and convolutional approaches, and three board-certified breast radiologists. + Results: On a test set of 82 masses (45 benign, 37 malignant), the proposed CADx system performed better than all other model architectures evaluated, with an increase in the area under the receiver operating characteristics curve (AUC) of 0.05 +- 0.02, and achieving a final AUC of 0.947, outperforming the three radiologists (AUC = 0.814 - 0.902). + Conclusions: In conclusion, the system demonstrated its potential usefulness in breast cancer diagnosis by improving mass malignancy assessment.}, + url = {http://dx.doi.org/10.1117/1.JMI.8.2.024501}, + file = {Caba21.pdf:pdf\\Caba21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Medical Imaging}, + automatic = {yes}, + citation-count = {4}, + volume = {8}, + pmid = {33796604}, } @inproceedings{Call19, @@ -3876,23 +4926,25 @@ @inproceedings{Call19 year = {2019}, series = SPIE, abstract = {In this work we analyze the eect of label noise in training and test data when performing classication experi- - ments on chest radiographs (CXRs) with modern deep learning architectures. We use ChestXRay14, the largest - publicly available CXR dataset. We simulate situs inversus by horizontal ipping of the CXRs, allowing us to - precisely control the amount of label noise. We also perform experiments in classifying emphysema using the - ChestXRay14 provided labels that are known to be noisy. Our situs inversus experiments conrm results from - the computer vision literature that deep learning architectures are relatively robust but not completely insensi- - tive to label noise in the training data: without or with very low noise, classication results are near perfect; 16% - and 32% training label noise only lead to a 1.5% and 4.6% drop in accuracy. We investigate two metrics that - could be used to identify test samples that have an incorrect label: model condence and model uncertainty. We - show, in an observer study with an experienced chest radiologist, that both measures are eective in identifying - samples in ChestXRay14 that are erroneously labeled for the presence of emphysema.}, + ments on chest radiographs (CXRs) with modern deep learning architectures. We use ChestXRay14, the largest + publicly available CXR dataset. We simulate situs inversus by horizontal ipping of the CXRs, allowing us to + precisely control the amount of label noise. We also perform experiments in classifying emphysema using the + ChestXRay14 provided labels that are known to be noisy. Our situs inversus experiments conrm results from + the computer vision literature that deep learning architectures are relatively robust but not completely insensi- + tive to label noise in the training data: without or with very low noise, classication results are near perfect; 16% + and 32% training label noise only lead to a 1.5% and 4.6% drop in accuracy. We investigate two metrics that + could be used to identify test samples that have an incorrect label: model condence and model uncertainty. We + show, in an observer study with an experienced chest radiologist, that both measures are eective in identifying + samples in ChestXRay14 that are erroneously labeled for the presence of emphysema.}, file = {Call19.pdf:pdf\\Call19.pdf:PDF}, optnote = {DIAG}, number = {1}, doi = {10.1117/12.2514290}, month = {3}, gsid = {1458347346763809084}, - gscites = {4}, + gscites = {16}, + ss_id = {58662a2da377ad2a9b250b514128988009a2a0b3}, + all_ss_ids = {['58662a2da377ad2a9b250b514128988009a2a0b3']}, } @inproceedings{Call19b, @@ -3905,7 +4957,9 @@ @inproceedings{Call19b file = {:pdf/Call19b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {13448494044754660685}, - gscites = {4}, + gscites = {15}, + ss_id = {45c2090f06bae6402a0b0d4c77c5f5ee3fd7a513}, + all_ss_ids = {['45c2090f06bae6402a0b0d4c77c5f5ee3fd7a513']}, } @article{Call21, @@ -3922,6 +4976,9 @@ @article{Call21 publisher = {Public Library of Science}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/238626}, + ss_id = {58f03af16be931f9a56c8499ed30efbb9b9c3592}, + all_ss_ids = {['58f03af16be931f9a56c8499ed30efbb9b9c3592']}, + gscites = {3}, } @article{Call21a, @@ -3936,6 +4993,9 @@ @article{Call21a pmid = {34171622}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/235735}, + ss_id = {6cc01a49101001e1a490645e941b3d2311dd4265}, + all_ss_ids = {['6cc01a49101001e1a490645e941b3d2311dd4265']}, + gscites = {168}, } @article{Call22, @@ -3950,6 +5010,9 @@ @article{Call22 journal = PLOSONE, pmid = {35900979}, year = {2022}, + ss_id = {e6376c22784f8360aa3d054fbfecb113265c032a}, + all_ss_ids = {['e6376c22784f8360aa3d054fbfecb113265c032a']}, + gscites = {1}, } @article{Call22a, @@ -3963,6 +5026,54 @@ @article{Call22a optnote = {DIAG, RADIOLOGY}, pmid = {36374875}, year = {2022}, + ss_id = {5cf064abf52e73426c32a6c4725f0de26e0c1c5a}, + all_ss_ids = {['5cf064abf52e73426c32a6c4725f0de26e0c1c5a']}, + gscites = {4}, +} + +@phdthesis{Call23, + author = {Erdi \c{C}all\i}, + title = {Deep learning methods towards clinically applicable Chest X-ray interpretation systems}, + url = {https://repository.ubn.ru.nl/handle/2066/292983}, + abstract = {Chest X-Ray is a very commonly acquired, low-cost imaging examination which is sensitive for multiple pathologies while exposing the patient to very little ionizing radiation. The number of medical imaging examinations being acquired increases year-on-year while there is a global shortage of radiologists qualified to interpret the images. This research presents findings on the use of deep learning for interpretation of chest X-Ray images. An in-depth review of the current state-of-the-art is provided as well as investigation of handling of label noise, outlier detection, explainable identification of emphysema and detection of COVID-19 with robustness to missing data.}, + copromotor = {K. Murphy}, + file = {:pdf/Calli23.pdf:PDF}, + journal = {PhD thesis}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken}, + school = {Radboud University, Nijmegen}, + year = {2023}, +} + +@article{Cana22, + author = {Canalini, Luca and Klein, Jan and Waldmannstetter, Diana and Kofler, Florian and Cerri, Stefano and Hering, Alessa and Heldmann, Stefan and Schlaeger, Sarah and Menze, Bjoern H. and Wiestler, Benedikt and Kirschke, Jan and Hahn, Horst K.}, + title = {~~Quantitative evaluation of the influence of multiple MRI sequences and of pathological tissues on the registration of longitudinal data acquired during brain tumor treatment}, + doi = {10.3389/fnimg.2022.977491}, + year = {2022}, + abstract = {Registration methods facilitate the comparison of multiparametric magnetic resonance images acquired at different stages of brain tumor treatments. Image-based registration solutions are influenced by the sequences chosen to compute the distance measure, and the lack of image correspondences due to the resection cavities and pathological tissues. Nonetheless, an evaluation of the impact of these input parameters on the registration of longitudinal data is still missing. This work evaluates the influence of multiple sequences, namely T1-weighted (T1), T2-weighted (T2), contrast enhanced T1-weighted (T1-CE), and T2 Fluid Attenuated Inversion Recovery (FLAIR), and the exclusion of the pathological tissues on the non-rigid registration of pre- and post-operative images. We here investigate two types of registration methods, an iterative approach and a convolutional neural network solution based on a 3D U-Net. We employ two test sets to compute the mean target registration error (mTRE) based on corresponding landmarks. In the first set, markers are positioned exclusively in the surroundings of the pathology. The methods employing T1-CE achieves the lowest mTREs, with a improvement up to 0.8 mm for the iterative solution. The results are higher than the baseline when using the FLAIR sequence. When excluding the pathology, lower mTREs are observable for most of the methods. In the second test set, corresponding landmarks are located in the entire brain volumes. Both solutions employing T1-CE obtain the lowest mTREs, with a decrease up to 1.16 mm for the iterative method, whereas the results worsen using the FLAIR. When excluding the pathology, an improvement is observable for the CNN method using T1-CE. Both approaches utilizing the T1-CE sequence obtain the best mTREs, whereas the FLAIR is the least informative to guide the registration process. Besides, the exclusion of pathology from the distance measure computation improves the registration of the brain tissues surrounding the tumor. Thus, this work provides the first numerical evaluation of the influence of these parameters on the registration of longitudinal magnetic resonance images, and it can be helpful for developing future algorithms.}, + url = {http://dx.doi.org/10.3389/fnimg.2022.977491}, + file = {Cana22.pdf:pdf\\Cana22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Frontiers in Neuroimaging}, + automatic = {yes}, + citation-count = {1}, + volume = {1}, + pmid = {37555157}, +} + +@book{Cana23, + author = {Canalini, Luca and Klein, Jan and Gerken, Annika and Heldmann, Stefan and Hering, Alessa and Hahn, Horst K.}, + title = {~~Iterative Method to Register Longitudinal MRI Acquisitions in Neurosurgical Context}, + doi = {10.1007/978-3-031-33842-7_23}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-031-33842-7_23}, + file = {Cana23.pdf:pdf\\Cana23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Brainlesion: Glioma, Multiple Sclerosis, Stroke and Traumatic Brain Injuries}, + automatic = {yes}, + citation-count = {0}, + pages = {262-272}, } @article{Carr11, @@ -3998,7 +5109,9 @@ @article{Cast17 optnote = {DIAG}, year = {2017}, gsid = {17547073303920483171}, - gscites = {5}, + gscites = {10}, + ss_id = {d20888ebb2f710492747031f387c4efce4f2c2dd}, + all_ss_ids = {['d20888ebb2f710492747031f387c4efce4f2c2dd']}, } @article{Cele20, @@ -4012,6 +5125,9 @@ @article{Cele20 pmid = {31651690}, optnote = {DIAG, RADIOLOGY}, file = {Cele20.pdf:pdf\\Cele20.pdf:PDF}, + ss_id = {b8c7e6fcff579b9332541d2cef25b6960906096e}, + all_ss_ids = {['b8c7e6fcff579b9332541d2cef25b6960906096e']}, + gscites = {8}, } @inproceedings{Char13, @@ -4042,7 +5158,9 @@ @article{Char15 optnote = {DIAG, RADIOLOGY}, pmid = {26584489}, gsid = {7303540606188063974}, - gscites = {21}, + gscites = {51}, + ss_id = {4db42d98f611269628eb8ddc4171c4a5f412506a}, + all_ss_ids = {['4db42d98f611269628eb8ddc4171c4a5f412506a']}, } @conference{Char15a, @@ -4052,15 +5170,15 @@ @conference{Char15a year = {2015}, url = {http://rsna2015.rsna.org/program/}, abstract = {PURPOSE - Changes in the morphology of the airways contributes to lung function impairment in chronic obstructive pulmonary disease (COPD). Measurements of airway morphology might be influenced by the quality of the airway segmentation. In this study we investigate the stability of a commonly used airway measurement (Pi10) from CT scans for varying segmentation depths of the airways. - METHOD AND MATERIALS - Inspiratory low-dose thoracic CT scans of 267 subjects, well distributed over GOLD stages, were selected for this study. Airways were automatically extracted by a state-of-the-art segmentation method and manually corrected to ensure a leakage free segmentation. Airway wall thickness quantification was performed in orthogonal cross-sections every 1mm throughout the entire airway tree using an intensity-integration technique which accounts for partial volume effects. Using regression on all cross-sectional measurements, airway morphology was expressed as the square root of wall area at airways with a perimeter of 10mm (Pi10). To determine the sensitivity of the Pi10 measurement to the length of the segmented airway tree, sensitivity analysis was performed on Pi10 by leaving-out wall measurements of the smallest airways and recalculating the Pi10. For each subject, Pi10 regression analysis was repeated excluding airways with a lumen perimeter below 6mm, 8mm or 10mm. The recalculated Pi10 measurements were compared to the baseline Pi10. - RESULTS - The segmented airway trees consisted for 55% of airways with lumen diameters below 10mm, 19% below 8mm, and 1% below 6mm.The average baseline Pi10 of all subjects was 2.43 +/- 0.56 (range [1.40, 4.36]), which corresponds to an average airway wall thickness (for an airway with a lumen perimeter of 10mm) of 0.52mm +/- 0.21mm. By excluding airways with a lumen perimeter below 6, 8 or 10mm from the regression analysis, absolute changes in Pi10 were 0.003 +/- 0.004 (0.11%), 0.035 +/- 0.023 (1.46%), and 0.107 +/- 0.087 (4.6%), respectively, corresponding to changes in airway wall thickness (at 10mm lumen perimeter) of 0.001, 0.013, and 0.039mm. - CONCLUSION - The commonly used Pi10 measurement to express airway morphology from a CT scan is insensitive to the exclusion of smaller airways in the computation. - CLINICAL RELEVANCE/APPLICATION - When expressing airway morhplogy as Pi10, there is no need to (manually) adjust automatic airway segmentation methods to include smaller airways in order to obtain an accurate Pi10 measurement.}, + Changes in the morphology of the airways contributes to lung function impairment in chronic obstructive pulmonary disease (COPD). Measurements of airway morphology might be influenced by the quality of the airway segmentation. In this study we investigate the stability of a commonly used airway measurement (Pi10) from CT scans for varying segmentation depths of the airways. + METHOD AND MATERIALS + Inspiratory low-dose thoracic CT scans of 267 subjects, well distributed over GOLD stages, were selected for this study. Airways were automatically extracted by a state-of-the-art segmentation method and manually corrected to ensure a leakage free segmentation. Airway wall thickness quantification was performed in orthogonal cross-sections every 1mm throughout the entire airway tree using an intensity-integration technique which accounts for partial volume effects. Using regression on all cross-sectional measurements, airway morphology was expressed as the square root of wall area at airways with a perimeter of 10mm (Pi10). To determine the sensitivity of the Pi10 measurement to the length of the segmented airway tree, sensitivity analysis was performed on Pi10 by leaving-out wall measurements of the smallest airways and recalculating the Pi10. For each subject, Pi10 regression analysis was repeated excluding airways with a lumen perimeter below 6mm, 8mm or 10mm. The recalculated Pi10 measurements were compared to the baseline Pi10. + RESULTS + The segmented airway trees consisted for 55% of airways with lumen diameters below 10mm, 19% below 8mm, and 1% below 6mm.The average baseline Pi10 of all subjects was 2.43 +/- 0.56 (range [1.40, 4.36]), which corresponds to an average airway wall thickness (for an airway with a lumen perimeter of 10mm) of 0.52mm +/- 0.21mm. By excluding airways with a lumen perimeter below 6, 8 or 10mm from the regression analysis, absolute changes in Pi10 were 0.003 +/- 0.004 (0.11%), 0.035 +/- 0.023 (1.46%), and 0.107 +/- 0.087 (4.6%), respectively, corresponding to changes in airway wall thickness (at 10mm lumen perimeter) of 0.001, 0.013, and 0.039mm. + CONCLUSION + The commonly used Pi10 measurement to express airway morphology from a CT scan is insensitive to the exclusion of smaller airways in the computation. + CLINICAL RELEVANCE/APPLICATION + When expressing airway morhplogy as Pi10, there is no need to (manually) adjust automatic airway segmentation methods to include smaller airways in order to obtain an accurate Pi10 measurement.}, optnote = {DIAG, RADIOLOGY}, } @@ -4071,15 +5189,15 @@ @conference{Char15b year = {2015}, url = {http://rsna2015.rsna.org/program/}, abstract = {PURPOSE - Automated classification of pulmonary arteries and veins in thoracic CT scans is an unsolved problem which is important for e.g. CAD of pulmonary embolisms and treatment planning. This study presents and validates a new anatomy-based method to automatically classify arteries and veins in non-contrast chest CT scans. - METHOD AND MATERIALS - A set of 55 full inspiration non-contrast low dose chest CT scans (16x0.75mm, 120-140kVp, 30mAs) with variable severity of emphysema and interstitial lung diseases, were taken from a lung cancer screening trial. In all state-of-the-art vessel segmentation algorithms, arteries and veins are attached at locations where they cross, since these algorithms are not designed to distinguish between bifurcating and crossing vessels. This method starts with automatic vessel segmentation, followed by pruning the vessel segmentation to detect locations that are inconsistent with the topology of a tree structure. By disconnecting the vessels at these locations, the vessel segmentation is separated into subtrees that fulfill a tree structure and are assumed to be of an arterial or venous label. Next, subtrees are grouped using anatomical knowledge that arterial and venous capillaries meet each other at the alveoli, which implies that the corresponding peripheral arteries and veins go towards similar regions. By analyzing the peripheral vessels in each subtree, subtrees of the same artery-vein label are grouped without knowing the actual label. To extract the final artery-vein labels of the grouped subtrees, classification is performed using the fact that veins have an overall larger volume compared to arteries. For quantitative evaluation, two human observers manually labeled a total of 2750 randomly selected arteries and veins from all 55 scans. The accuracy and Cohen's kappa between the observers and between the method and observers were used for evaluation. - RESULTS - Inter-observer Cohen's kappa was 0.84 with 93% accuracy. The proposed method achieved a mean accuracy of 88% and a Cohen's kappa of 0.76. - CONCLUSION - A new concept for artery-vein separation and classification was presented that uses anatomical information from peripheral arteries and veins. The performance of the presented method closely approximated the inter-observer agreement. - CLINICAL RELEVANCE/APPLICATION - Automatic artery-vein classification is essential for investigating pulmonary hypertension, COPD and for improving CAD systems for pulmonary embolisms.}, + Automated classification of pulmonary arteries and veins in thoracic CT scans is an unsolved problem which is important for e.g. CAD of pulmonary embolisms and treatment planning. This study presents and validates a new anatomy-based method to automatically classify arteries and veins in non-contrast chest CT scans. + METHOD AND MATERIALS + A set of 55 full inspiration non-contrast low dose chest CT scans (16x0.75mm, 120-140kVp, 30mAs) with variable severity of emphysema and interstitial lung diseases, were taken from a lung cancer screening trial. In all state-of-the-art vessel segmentation algorithms, arteries and veins are attached at locations where they cross, since these algorithms are not designed to distinguish between bifurcating and crossing vessels. This method starts with automatic vessel segmentation, followed by pruning the vessel segmentation to detect locations that are inconsistent with the topology of a tree structure. By disconnecting the vessels at these locations, the vessel segmentation is separated into subtrees that fulfill a tree structure and are assumed to be of an arterial or venous label. Next, subtrees are grouped using anatomical knowledge that arterial and venous capillaries meet each other at the alveoli, which implies that the corresponding peripheral arteries and veins go towards similar regions. By analyzing the peripheral vessels in each subtree, subtrees of the same artery-vein label are grouped without knowing the actual label. To extract the final artery-vein labels of the grouped subtrees, classification is performed using the fact that veins have an overall larger volume compared to arteries. For quantitative evaluation, two human observers manually labeled a total of 2750 randomly selected arteries and veins from all 55 scans. The accuracy and Cohen's kappa between the observers and between the method and observers were used for evaluation. + RESULTS + Inter-observer Cohen's kappa was 0.84 with 93% accuracy. The proposed method achieved a mean accuracy of 88% and a Cohen's kappa of 0.76. + CONCLUSION + A new concept for artery-vein separation and classification was presented that uses anatomical information from peripheral arteries and veins. The performance of the presented method closely approximated the inter-observer agreement. + CLINICAL RELEVANCE/APPLICATION + Automatic artery-vein classification is essential for investigating pulmonary hypertension, COPD and for improving CAD systems for pulmonary embolisms.}, optnote = {DIAG, RADIOLOGY}, } @@ -4089,14 +5207,14 @@ @conference{Char16 booktitle = ATS, year = {2016}, abstract = {{RATIONALE:} - To evaluate the relative contributions of quantitative {CT} ({QCT}) measures of emphysema, air trapping, and airway wall thickening and narrowing, to airflow obstruction in cigarette smokers with and without chronic obstructive lung disease ({COPD}). - {METHOD:} - 2000 cigarette smokers participating in the {COPDG}ene study were evaluated, 818 subjects were excluded because of missing {QCT}. Thirona Lung Quantification software was used to extract {QCT} measures from inspiratory and expiratory {CT} scans for each subject, including emphysema (%{LAA}-950, defined as the percentage of low attenuation areas ({LAA}) below -950{HU} in inspiratory scans), gas trapping (%{LAA}-856, defined as the percentage of {LAA} below -856{HU} in expiration), and an index score for airway wall thickening and/or narrowing (Pi10, defined as the root of the wall area of a hypothetical airway of 10-mm internal perimeter). The evaluated spirometry measures included the ratio of forced expiratory volume in 1 second ({FEV}1) and forced vital capacity ({FVC}), and the predicted percentage of {FEV}1 ({FEV}1%-predicted). - {QCT} measures were correlated to {FEV}1/{FVC} and {FEV}1%-predicted using Pearson correlation. In addition, multiple linear regression analysis was used to evaluate the predicted value of the {QCT} measures on both {FEV}1/{FVC} and {FEV}1%-predicted. For these models, the spirometry measures were log10-transformed to ensure a distribution of residuals closer to normal. - {RESULTS:} - The 1183 subjects were divided over {GOLD} stagesChar16 0 to 4:478, 100, 279, 143 and 47. 136 subjects were unclassified by {GOLD}. %{LAA}-950, %{LAA}-856, and Pi10 correlated significantly with both {FEV}1/{FVC} (p<0.0001, r= -0.758, r=-0.829, and r=-0.423, respectively) and {FEV}1%-predicted (p<0.0001, r= -0.628, r=-0.728, and r=-0.547, respectively). In the regression model for {FEV}1/{FVC}, the combination of the three {QCT} measures accounted for 74.5% of the variation in {FEV}1/{FVC}, with a relative contribution of 68.7% for %{LAA}-856, 3.1% for Pi10, and 2.6% for %{LAA}-950. In the regression model for {FEV}1%-predicted, the combination of the three {QCT} measures accounted for 65.8% of the variation in {FEV}1%-predicted, with a relative contribution of 52.9% for %{LAA}-856, 11.6% for Pi10, and 1.3% for %{LAA}-950. - {CONCLUSION:} - Gas trapping and airway wall thickening and/or narrowing are the major contributors to airflow obstruction in cigarette smokers.}, + To evaluate the relative contributions of quantitative {CT} ({QCT}) measures of emphysema, air trapping, and airway wall thickening and narrowing, to airflow obstruction in cigarette smokers with and without chronic obstructive lung disease ({COPD}). + {METHOD:} + 2000 cigarette smokers participating in the {COPDG}ene study were evaluated, 818 subjects were excluded because of missing {QCT}. Thirona Lung Quantification software was used to extract {QCT} measures from inspiratory and expiratory {CT} scans for each subject, including emphysema (%{LAA}-950, defined as the percentage of low attenuation areas ({LAA}) below -950{HU} in inspiratory scans), gas trapping (%{LAA}-856, defined as the percentage of {LAA} below -856{HU} in expiration), and an index score for airway wall thickening and/or narrowing (Pi10, defined as the root of the wall area of a hypothetical airway of 10-mm internal perimeter). The evaluated spirometry measures included the ratio of forced expiratory volume in 1 second ({FEV}1) and forced vital capacity ({FVC}), and the predicted percentage of {FEV}1 ({FEV}1%-predicted). + {QCT} measures were correlated to {FEV}1/{FVC} and {FEV}1%-predicted using Pearson correlation. In addition, multiple linear regression analysis was used to evaluate the predicted value of the {QCT} measures on both {FEV}1/{FVC} and {FEV}1%-predicted. For these models, the spirometry measures were log10-transformed to ensure a distribution of residuals closer to normal. + {RESULTS:} + The 1183 subjects were divided over {GOLD} stagesChar16 0 to 4:478, 100, 279, 143 and 47. 136 subjects were unclassified by {GOLD}. %{LAA}-950, %{LAA}-856, and Pi10 correlated significantly with both {FEV}1/{FVC} (p<0.0001, r= -0.758, r=-0.829, and r=-0.423, respectively) and {FEV}1%-predicted (p<0.0001, r= -0.628, r=-0.728, and r=-0.547, respectively). In the regression model for {FEV}1/{FVC}, the combination of the three {QCT} measures accounted for 74.5% of the variation in {FEV}1/{FVC}, with a relative contribution of 68.7% for %{LAA}-856, 3.1% for Pi10, and 2.6% for %{LAA}-950. In the regression model for {FEV}1%-predicted, the combination of the three {QCT} measures accounted for 65.8% of the variation in {FEV}1%-predicted, with a relative contribution of 52.9% for %{LAA}-856, 11.6% for Pi10, and 1.3% for %{LAA}-950. + {CONCLUSION:} + Gas trapping and airway wall thickening and/or narrowing are the major contributors to airflow obstruction in cigarette smokers.}, optnote = {DIAG, RADIOLOGY}, } @@ -4106,7 +5224,7 @@ @conference{Char16a booktitle = RSNA, year = {2016}, abstract = {PURPOSE: We investigated the relationship between airway dimensions and airflow obstruction and respiratory quality of life in current and former cigarette smokers. METHOD AND MATERIALS: - Cigarette smokers were studied that enrolled in the COPDGene study. Spirometry assessment included forced expiratory volume in 1 sec (FEV1), forced vital capacity (FVC), % predicted FEV1 (FEV1%-p), % predicted FVC (FVC%-p), and peak expiratory flow (PEF). Respiratory quality of life was assessed by the St George's Respiratory Questionnaire (SGRQ) score and 6 Minute Walking Distance (SMWD). Inspiratory CT was available to extract the airways, the amount of emphysema, and the total lung capacity (TLC). Lumen perimeters and airway wall areas were automatically extracted perpendicular to the airways. Linear regression was performed on these measurements to calculate an index score of airway wall thickness, expressed as the square root of wall area at airways with a perimeter of 10mm (Pi10). Emphysema was defined as the percentage of low-attenuation area below -950 HU (LAA%-950). Multiple linear regression was used to determine the predictive value of Pi10 and smoking status on airflow obstruction and respiratory quality of life. An interaction was included in the model to investigate if the effect of Pi10 differed by smoking status. All models were adjusted for age, gender, body mass index, pack years, bronchodilator responsiveness, TLC, and LAA%-950. RESULTS: 1544 cigarette smokers (894 former smokers) were included, with a mean age of 60.7 A-A?A 1/2 8.9 years and a mean Pi10 of 2.23 A-A?A 1/2 0.57mm. Pi10 was significantly associated with all airflow obstruction and respiratory quality of life measures (all p<0.001). The interaction between Pi10 and smoking status was significant for all measures except FVC%-p (p=0.30) and SGRQ score (p=0.064). This indicates that the effect of Pi10 on FEV1%-p, PEF, FEV1/FVC and SMWD was significantly reduced in current smokers compared to former smokers. CONCLUSION: Pi10 independently contributes to airflow obstruction and respiratory quality of life. This effect is stronger in former smokers as compared to current smokers. CLNICAL RELEVANCE/APPLICATION: Pi10 is an independent marker for airflow obstruction and respiratory quality of life and may be more strongly associated with these outcomes in former smokers than current smokers.}, + Cigarette smokers were studied that enrolled in the COPDGene study. Spirometry assessment included forced expiratory volume in 1 sec (FEV1), forced vital capacity (FVC), % predicted FEV1 (FEV1%-p), % predicted FVC (FVC%-p), and peak expiratory flow (PEF). Respiratory quality of life was assessed by the St George's Respiratory Questionnaire (SGRQ) score and 6 Minute Walking Distance (SMWD). Inspiratory CT was available to extract the airways, the amount of emphysema, and the total lung capacity (TLC). Lumen perimeters and airway wall areas were automatically extracted perpendicular to the airways. Linear regression was performed on these measurements to calculate an index score of airway wall thickness, expressed as the square root of wall area at airways with a perimeter of 10mm (Pi10). Emphysema was defined as the percentage of low-attenuation area below -950 HU (LAA%-950). Multiple linear regression was used to determine the predictive value of Pi10 and smoking status on airflow obstruction and respiratory quality of life. An interaction was included in the model to investigate if the effect of Pi10 differed by smoking status. All models were adjusted for age, gender, body mass index, pack years, bronchodilator responsiveness, TLC, and LAA%-950. RESULTS: 1544 cigarette smokers (894 former smokers) were included, with a mean age of 60.7 A-A?A 1/2 8.9 years and a mean Pi10 of 2.23 A-A?A 1/2 0.57mm. Pi10 was significantly associated with all airflow obstruction and respiratory quality of life measures (all p<0.001). The interaction between Pi10 and smoking status was significant for all measures except FVC%-p (p=0.30) and SGRQ score (p=0.064). This indicates that the effect of Pi10 on FEV1%-p, PEF, FEV1/FVC and SMWD was significantly reduced in current smokers compared to former smokers. CONCLUSION: Pi10 independently contributes to airflow obstruction and respiratory quality of life. This effect is stronger in former smokers as compared to current smokers. CLNICAL RELEVANCE/APPLICATION: Pi10 is an independent marker for airflow obstruction and respiratory quality of life and may be more strongly associated with these outcomes in former smokers than current smokers.}, optnote = {DIAG, RADIOLOGY}, } @@ -4116,7 +5234,7 @@ @conference{Char16b booktitle = RSNA, year = {2016}, abstract = {PURPOSE: To predict COPD and smoking-related morbidity in cigarette smokers using quantitative CT (QCT) measures. METHOD AND MATERIALS: 1544 subjects were included from the COPDGene study. COPD was defined by a ratio of forced expiratory volume in 1 sec. (FEV1) and forced vital capacity (FVC) < 0.7. Smoking-related morbidity was defined as FEV1/FVC < 0.70 with either a St George's Respiratory Questionnaire score >= 25 or an exacerbation frequency >= 2/year. On inspiratory CT, multiple cross-sectional lumen perimeters and airway wall areas were extracted from the airways. Using linear regression, airway wall thickness was defined as the square root of wall area of an airway with a perimeter of 10mm (Pi10). Total lung capacity (TLC) and emphysema were measured on inspiratory CT, where emphysema was defined as the % of low-attenuation areas (LAA%) < -950HU (LAA%-950). Air-trapping was defined on expiratory CT as LAA% < -856HU (LAA%-856). Six logistic regression models were fitted for both the prediction of COPD and smoking-related morbidity using a random subset of 761 subjects. Model 1 included only age, gender, BMI, pack years, smoking status, and TLC, while models 2 to 6 additionally included: LAA%-950 (model 2), LAA%-856 (model 3), Pi10 (model 4), LAA%-950 + Pi10 (model 5), and LAA%-950 + LAA%-856 + Pi10 (model 6). The models were validated on a separate set (810 subjects) using the area under the receiver operating curve (AUC). RESULTS: - The validation set consisted of 369 subjects with and 441 without COPD. QCT measures were independent predictors of COPD in all models (p<0.001), with AUC values for models 1 to 6 of 0.77, 0.85, 0.90, 0.87, 0.91, and 0.93, respectively. The validation set consisted of 216 subject with and 594 without smoking-related morbidity. QCT measures were independent predictors of smoking-related morbidity in all models (p<0.001, except for LAA%-950 in model 5), with AUC values for models 1 to 6 of 0.72, 0.83, 0.87, 0.83, 0.88, and 0.89, respectively. CONCLUSION: LAA%-950, LAA%-856, and Pi10 are independent predictors of COPD and smoking-related morbidity. The model including only inspiratory QCT predictors has similar predictive value to the model that also includes expiratory air-trapping. CLNICAL RELEVANCE/APPLICATION: Since LAA%-950 and Pi10 can be readily extracted from inspiratory images, these measures may be useful to predict smoking related morbidity in lung cancer screening.}, + The validation set consisted of 369 subjects with and 441 without COPD. QCT measures were independent predictors of COPD in all models (p<0.001), with AUC values for models 1 to 6 of 0.77, 0.85, 0.90, 0.87, 0.91, and 0.93, respectively. The validation set consisted of 216 subject with and 594 without smoking-related morbidity. QCT measures were independent predictors of smoking-related morbidity in all models (p<0.001, except for LAA%-950 in model 5), with AUC values for models 1 to 6 of 0.72, 0.83, 0.87, 0.83, 0.88, and 0.89, respectively. CONCLUSION: LAA%-950, LAA%-856, and Pi10 are independent predictors of COPD and smoking-related morbidity. The model including only inspiratory QCT predictors has similar predictive value to the model that also includes expiratory air-trapping. CLNICAL RELEVANCE/APPLICATION: Since LAA%-950 and Pi10 can be readily extracted from inspiratory images, these measures may be useful to predict smoking related morbidity in lung cancer screening.}, optnote = {DIAG, RADIOLOGY}, } @@ -4130,15 +5248,17 @@ @article{Char16c doi = {10.1016/j.media.2016.11.001}, url = {http://dx.doi.org/10.1016/j.media.2016.11.001}, abstract = {We propose a novel method to improve airway segmentation in thoracic computed tomography (CT) by detecting and removing leaks. Leak detection is formulated as a classification problem, in which a convolutional network (ConvNet) is trained in a supervised fashion to perform the classification task. In order to increase the segmented airway tree length, we take advantage of the fact that multiple segmentations can be extracted from a given airway segmentation algorithm by varying the parameters that influence the tree length and the amount of leaks. We propose a strategy in which the combination of these segmentations after removing leaks can increase the airway tree length while limiting the amount of leaks. This strategy therefore largely circumvents the need for parameter fine-tuning of a given airway segmentation algorithm. - - The ConvNet was trained and evaluated using a subset of inspiratory thoracic CT scans taken from the COPDGene study. Our method was validated on a separate independent set of the EXACT?09 challenge. We show that our method significantly improves the quality of a given leaky airway segmentation, achieving a higher sensitivity at a low false-positive rate compared to all the state-of-the-art methods that entered in EXACT09, and approaching the performance of the combination of all of them.}, + + The ConvNet was trained and evaluated using a subset of inspiratory thoracic CT scans taken from the COPDGene study. Our method was validated on a separate independent set of the EXACT?09 challenge. We show that our method significantly improves the quality of a given leaky airway segmentation, achieving a higher sensitivity at a low false-positive rate compared to all the state-of-the-art methods that entered in EXACT09, and approaching the performance of the combination of all of them.}, file = {Char16c.pdf:pdf\\Char16c.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {27842236}, publisher = {Elsevier {BV}}, month = {2}, gsid = {10119306392018768738}, - gscites = {49}, + gscites = {86}, + ss_id = {d884880febbc8e6c5f86c1040c009174632569c4}, + all_ss_ids = {['d884880febbc8e6c5f86c1040c009174632569c4']}, } @phdthesis{Char17, @@ -4170,8 +5290,10 @@ @article{Char18 optnote = {DIAG, RADIOLOGY}, pmid = {29330380}, gsid = {8466773781330734644}, - gscites = {9}, + gscites = {16}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/183711}, + ss_id = {3ed451d2830a2afcbbd254f0015eeafa796ba1b7}, + all_ss_ids = {['3ed451d2830a2afcbbd254f0015eeafa796ba1b7']}, } @article{Char19, @@ -4188,8 +5310,10 @@ @article{Char19 pmid = {30665516}, month = {1}, gsid = {2044101552450383649}, - gscites = {10}, + gscites = {48}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/201027}, + ss_id = {4c1db92618a758e92b0964e7ed65ea7ec48af0f0}, + all_ss_ids = {['4c1db92618a758e92b0964e7ed65ea7ec48af0f0']}, } @article{Chav93, @@ -4210,6 +5334,33 @@ @article{Chav93 gscites = {16}, } +@inproceedings{Chel22, + author = {Eduard Chelebian and Francesco Ciompi}, + booktitle = {Learning Meaningful Representations of Life, NeurIPS 2022}, + title = {Seeded iterative clustering for histology region identification}, + abstract = {Annotations are necessary to develop computer vision algorithms for histopathology, but dense annotations at a high resolution are often time-consuming to make. Deep learning models for segmentation are a way to alleviate the process, but require large amounts of training data, training times and computing power. To address these issues, we present seeded iterative clustering to produce a coarse segmentation densely and at the whole slide level. The algorithm uses precomputed representations as the clustering space and a limited amount of sparse interactive annotations as seeds to iteratively classify image patches. We obtain a fast and effective way of generating dense annotations for whole slide images and a framework that allows the comparison of neural network latent representations in the context of transfer learning.}, + file = {Chel22.pdf:pdf\\Chel22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, + ss_id = {892d4f3ee2cc9ff2dcf1c308fae68473dcf787d2}, + all_ss_ids = {['892d4f3ee2cc9ff2dcf1c308fae68473dcf787d2']}, + gscites = {0}, +} + +@article{Chen22, + author = {Chen, Yun-Fang and Vinayahalingam, Shankeeth and Berge, Stefaan and Liao, Yu-Fang and Maal, Thomas and Xi, Tong}, + title = {Is the pattern of mandibular asymmetry in mild craniofacial microsomia comparable to non-syndromic class II asymmetry?}, + doi = {10.1007/s00784-022-04429-6}, + issue = {6}, + pages = {4603--4613}, + volume = {26}, + abstract = {To compare the characteristics of mandibular asymmetry in patients with unilateral craniofacial microsomia (CFM) and class II asymmetry. Pretreatment cone-beam computed tomography of consecutive adults with Pruzansky-Kaban type I and IIA CFM (CFM group) was analyzed by 3D cephalometry. Fourteen mandibular landmarks and two dental landmarks were identified. The mandibular size and positional asymmetry were calculated by using landmark-based linear and volumetric measurements, in terms of asymmetry ratios (affected/non-affected side) and absolute differences (affected - non-affected side). Results were compared with non-syndromic class II with matched severity of chin deviation (Class II group). Statistical analyses included independent t test, paired t test, chi-square test, and ANOVA. CFM group (n, 21; mean age, 20.4 +- 2.5 years) showed significantly larger size asymmetry in regions of mandibular body, ramus, and condyle compared to Class II group (n, 21; mean age, 27.8 +- 5.9 years) (p < 0.05). The curvature of mandibular body was asymmetric in CFM. Regarding the positional asymmetry of mandibular body, while a comparable transverse shift and a negligible yaw rotation were found among the two groups, the roll rotation in CFM was significantly greater as well as the occlusal (6.06deg vs. 4.17deg) and mandibular (7.84deg vs. 2.80deg) plane cants (p < 0.05). Mild CFM showed significantly more severe size asymmetry and roll rotation in mandible than non-CFM class II asymmetry. To improve the mandibular size and positional asymmetry in CFM, adjunct hard tissue augmentation or reduction in addition to OGS orthodontics with a meticulous roll and yaw planning is compulsory, which is expected to be distinct from treating non-CFM class II asymmetry.}, + file = {Chen22.pdf:pdf\\Chen22.pdf:PDF}, + journal = {Clinical oral investigations}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35218426}, + year = {2022}, +} @article{Chle18, author = {Chlebus, Grzegorz and Schenk, Andrea and Moltz, Jan Hendrik and van Ginneken, Bram and Hahn, Horst Karl and Meine, Hans}, @@ -4226,7 +5377,9 @@ @article{Chle18 optnote = {DIAG, RADIOLOGY}, pmid = {30341319}, gsid = {1182212609291447328}, - gscites = {46}, + gscites = {162}, + ss_id = {27cebb08aca785444166209593fc0ec740469b1b}, + all_ss_ids = {['27cebb08aca785444166209593fc0ec740469b1b']}, } @article{Chle19, @@ -4244,7 +5397,9 @@ @article{Chle19 pmid = {31107915}, month = {5}, gsid = {3883884374712278395}, - gscites = {3}, + gscites = {38}, + ss_id = {5bdb1a2667cbf2ce2f6142f14218df298a9eb090}, + all_ss_ids = {['5bdb1a2667cbf2ce2f6142f14218df298a9eb090']}, } @conference{Chle19a, @@ -4256,26 +5411,42 @@ @conference{Chle19a optnote = {DIAG, RADIOLOGY}, } -@PhdThesis{Chle22, - author = {Grzegorz Chlebus}, - title = {Deep Learning-Based Segmentation in Multimodal Abdominal Imaging}, - url = {https://repository.ubn.ru.nl/handle/2066/251471}, - abstract = {This thesis is devoted to the applications of deep learning segmentation algorithms to multimodal abdominal imaging. It focuses on the segmentation of liver, prostate, and liver tumors in CT and MRI images. - It aims not only to propose and evaluate new segmentation architectures, but also to investigate aspects such as the required time for the correction of automatic segmentation results, the impact on the inter-observer variability, and the optimization of annotation effort. - The following objectives were undertaken as part of this thesis: - - 1. The development of a two-stage cascade system for liver and liver tumor segmentation in CT images (Chapter 2); - 2. The development of an ensemble of three orthogonal 2D CNNs for liver segmentation in late-phase T1W MRI images (Chapter 3); - 3. The investigation of various active learning strategies to optimally select a set of CT slices to obtain the best possible liver segmentation method in CT without the need to manually annotate a large amount of training data (Chapter 4); - 4. The development of a novel multi-planar 3D anisotropic CNN architecture for prostate segmentation in multi-planar T2W MRI images (Chapter 5).}, +@phdthesis{Chle22, + author = {Grzegorz Chlebus}, + title = {Deep Learning-Based Segmentation in Multimodal Abdominal Imaging}, + url = {https://repository.ubn.ru.nl/handle/2066/251471}, + abstract = {This thesis is devoted to the applications of deep learning segmentation algorithms to multimodal abdominal imaging. It focuses on the segmentation of liver, prostate, and liver tumors in CT and MRI images. + It aims not only to propose and evaluate new segmentation architectures, but also to investigate aspects such as the required time for the correction of automatic segmentation results, the impact on the inter-observer variability, and the optimization of annotation effort. + The following objectives were undertaken as part of this thesis: + + 1. The development of a two-stage cascade system for liver and liver tumor segmentation in CT images (Chapter 2); + 2. The development of an ensemble of three orthogonal 2D CNNs for liver segmentation in late-phase T1W MRI images (Chapter 3); + 3. The investigation of various active learning strategies to optimally select a set of CT slices to obtain the best possible liver segmentation method in CT without the need to manually annotate a large amount of training data (Chapter 4); + 4. The development of a novel multi-planar 3D anisotropic CNN architecture for prostate segmentation in multi-planar T2W MRI images (Chapter 5).}, copromotor = {H. Meine and A. Schenk}, - file = {Chle22.pdf:pdf/Chle22.pdf:PDF}, - journal = {PhD thesis}, - optnote = {DIAG, RADIOLOGY}, - promotor = {B. van Ginneken and H. K. Hahn}, - school = {Radboud University, Nijmegen}, - year = {2022}, -} + file = {Chle22.pdf:pdf/Chle22.pdf:PDF}, + journal = {PhD thesis}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken and H. K. Hahn}, + school = {Radboud University, Nijmegen}, + year = {2022}, +} + +@article{Chle22a, + author = {Chlebus, Grzegorz and Schenk, Andrea and Hahn, Horst K. and Van Ginneken, Bram and Meine, Hans}, + title = {~~Robust Segmentation Models Using an Uncertainty Slice Sampling-Based Annotation Workflow}, + doi = {10.1109/access.2022.3141021}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/ACCESS.2022.3141021}, + file = {Chle22a.pdf:pdf\\Chle22a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {IEEE Access}, + automatic = {yes}, + citation-count = {7}, + pages = {4728-4738}, + volume = {10}, +} @inproceedings{Chon11, author = {D. Chong and E. M. van Rikxoort and H. J. Kim and J. G. Goldin and M. S. Brown}, @@ -4345,7 +5516,9 @@ @article{Chun17 pmid = {28339311}, month = {7}, gsid = {6175266469384712630}, - gscites = {32}, + gscites = {48}, + ss_id = {bc86e211f26bd5c68e9548c1c215fa9605c6daa2}, + all_ss_ids = {['bc86e211f26bd5c68e9548c1c215fa9605c6daa2']}, } @article{Chun17a, @@ -4362,8 +5535,10 @@ @article{Chun17a pmid = {28439653}, month = {4}, gsid = {10492992913475401535}, - gscites = {8}, + gscites = {14}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/181940}, + ss_id = {abdee0f36ad16d6c3064aee8e98c4b2bd6c691a0}, + all_ss_ids = {['abdee0f36ad16d6c3064aee8e98c4b2bd6c691a0']}, } @article{Chun18, @@ -4376,36 +5551,38 @@ @article{Chun18 pages = {e0191874}, doi = {10.1371/journal.pone.0191874}, abstract = {PURPOSE: To evaluate whether, and to which extent, experienced radiologists are - able to visually correctly differentiate transient from persistent subsolid - nodules from a single CT examination alone and to determine CT morphological - features to make this differentiation. - MATERIALS AND METHODS: We selected 86 transient and 135 persistent subsolid - nodules from the National Lung Screening Trial (NLST) database. Four experienced - radiologists visually assessed a predefined list of morphological features and - gave a final judgment on a continuous scale (0-100). To assess observer - performance, area under the receiver operating characteristic (ROC) curve was - calculated. Statistical differences of morphological features between transient - and persistent lesions were calculated using Chi-square. Inter-observer agreement - of morphological features was evaluated by percentage agreement. - RESULTS: Forty-nine lesions were excluded by at least 2 observers, leaving 172 - lesions for analysis. On average observers were able to differentiate transient - from persistent subsolid nodules >= 10 mm with an area under the curve of 0.75 - (95% CI 0.67-0.82). Nodule type, lesion margin, presence of a well-defined - border, and pleural retraction showed significant differences between transient - and persistent lesions in two observers. Average pair-wise percentage agreement - for these features was 81%, 64%, 47% and 89% respectively. Agreement for other - morphological features varied from 53% to 95%. - CONCLUSION: The visual capacity of experienced radiologists to differentiate - persistent and transient subsolid nodules is moderate in subsolid nodules larger - than 10 mm. Performance of the visual assessment of CT morphology alone is not - sufficient to generally abandon a short-term follow-up for subsolid nodules.}, + able to visually correctly differentiate transient from persistent subsolid + nodules from a single CT examination alone and to determine CT morphological + features to make this differentiation. + MATERIALS AND METHODS: We selected 86 transient and 135 persistent subsolid + nodules from the National Lung Screening Trial (NLST) database. Four experienced + radiologists visually assessed a predefined list of morphological features and + gave a final judgment on a continuous scale (0-100). To assess observer + performance, area under the receiver operating characteristic (ROC) curve was + calculated. Statistical differences of morphological features between transient + and persistent lesions were calculated using Chi-square. Inter-observer agreement + of morphological features was evaluated by percentage agreement. + RESULTS: Forty-nine lesions were excluded by at least 2 observers, leaving 172 + lesions for analysis. On average observers were able to differentiate transient + from persistent subsolid nodules >= 10 mm with an area under the curve of 0.75 + (95% CI 0.67-0.82). Nodule type, lesion margin, presence of a well-defined + border, and pleural retraction showed significant differences between transient + and persistent lesions in two observers. Average pair-wise percentage agreement + for these features was 81%, 64%, 47% and 89% respectively. Agreement for other + morphological features varied from 53% to 95%. + CONCLUSION: The visual capacity of experienced radiologists to differentiate + persistent and transient subsolid nodules is moderate in subsolid nodules larger + than 10 mm. Performance of the visual assessment of CT morphology alone is not + sufficient to generally abandon a short-term follow-up for subsolid nodules.}, file = {Chun18.pdf:pdf/Chun18.pdf:PDF}, optnote = {DIAG}, pmid = {29438443}, month = {2}, gsid = {6862237226851140169}, - gscites = {4}, + gscites = {7}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/189883}, + ss_id = {ef537c1ce62c69018575d2c90b70024a32256ef5}, + all_ss_ids = {['ef537c1ce62c69018575d2c90b70024a32256ef5']}, } @phdthesis{Chun18a, @@ -4437,7 +5614,9 @@ @article{Chun18b pmid = {29777062}, month = {5}, gsid = {13012009912748302045}, - gscites = {18}, + gscites = {30}, + ss_id = {1cc0b06d80d21082d2610f11daa488cc6817b207}, + all_ss_ids = {['1cc0b06d80d21082d2610f11daa488cc6817b207']}, } @conference{Ciet11, @@ -4464,6 +5643,9 @@ @article{Ciet14 number = {1}, pmid = {23598953}, month = {4}, + ss_id = {f94e09771e679cce8593b18228915cb5b7474141}, + all_ss_ids = {['f94e09771e679cce8593b18228915cb5b7474141']}, + gscites = {37}, } @inproceedings{Ciom09, @@ -4586,6 +5768,8 @@ @inproceedings{Ciom13 optnote = {DIAG, RADIOLOGY}, gsid = {14808204315824444400}, gscites = {6}, + ss_id = {499ba2c155d9968756e92cf2ba8107dfa1e8c618}, + all_ss_ids = {['499ba2c155d9968756e92cf2ba8107dfa1e8c618']}, } @inproceedings{Ciom13a, @@ -4600,7 +5784,9 @@ @inproceedings{Ciom13a file = {Ciom13a.pdf:pdf\\Ciom13a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {14811347107651563370}, - gscites = {4}, + gscites = {6}, + ss_id = {a4e7fa7efd9d50e18411b053402aab5bcbc9ff99}, + all_ss_ids = {['a4e7fa7efd9d50e18411b053402aab5bcbc9ff99']}, } @article{Ciom14, @@ -4618,6 +5804,8 @@ @article{Ciom14 gsid = {2279805513688046695}, gscites = {5}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/139326}, + ss_id = {a5416d72cf39f365e4f12ea31fcba1a314e73567}, + all_ss_ids = {['a5416d72cf39f365e4f12ea31fcba1a314e73567']}, } @conference{Ciom14a, @@ -4645,7 +5833,9 @@ @article{Ciom14c pmid = {25420257}, month = {4}, gsid = {4309508126026672345}, - gscites = {41}, + gscites = {54}, + ss_id = {c46078f5f85c01090f2ee949a0d332de7d6e4463}, + all_ss_ids = {['c46078f5f85c01090f2ee949a0d332de7d6e4463']}, } @inproceedings{Ciom15, @@ -4663,6 +5853,8 @@ @inproceedings{Ciom15 month = {3}, gsid = {1720898603461111674}, gscites = {6}, + ss_id = {671d9c99335001133ebf2125167aa2f2bcee6efa}, + all_ss_ids = {['671d9c99335001133ebf2125167aa2f2bcee6efa']}, } @article{Ciom15a, @@ -4681,8 +5873,10 @@ @article{Ciom15a pmid = {26458112}, month = {12}, gsid = {14568521204042342742}, - gscites = {191}, + gscites = {258}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/154608}, + ss_id = {622791cfb4e509158fbc858e9a970283de0f8045}, + all_ss_ids = {['622791cfb4e509158fbc858e9a970283de0f8045']}, } @article{Ciom16a, @@ -4700,7 +5894,9 @@ @article{Ciom16a optnote = {DIAG, RADIOLOGY}, pmid = {27782708}, gsid = {13722029973076101949}, - gscites = {7}, + gscites = {15}, + ss_id = {271e85d19b4cd1997a110525ffb66df7a8fa7fe6}, + all_ss_ids = {['de1fc90d61276094416b430605d47ee5f8cc7b4d', '271e85d19b4cd1997a110525ffb66df7a8fa7fe6']}, } @conference{Ciom16b, @@ -4726,7 +5922,9 @@ @inproceedings{Ciom17 optnote = {DIAG}, month = {4}, gsid = {4102008958123816219}, - gscites = {75}, + gscites = {172}, + ss_id = {87b612323f759cea5cb224331bfacf59a0f335b6}, + all_ss_ids = {['87b612323f759cea5cb224331bfacf59a0f335b6']}, } @article{Ciom17a, @@ -4743,8 +5941,9 @@ @article{Ciom17a pmid = {28880026}, month = {4}, gsid = {8567006734401252147}, - gscites = {171}, + gscites = {307}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/174848}, + all_ss_ids = {['86b9d39fb026746752f95f87f87fd26b8512c913', 'fd0901b1f2121506391a7859de8fb695d159a393']}, } @article{Ciom21, @@ -4761,6 +5960,9 @@ @article{Ciom21 optnote = {DIAG}, publisher = {Institute of Electrical and Electronics Engineers ({IEEE})}, year = {2021}, + ss_id = {f8208405f959304ba68f3d11d7f880c305ac7b39}, + all_ss_ids = {['f8208405f959304ba68f3d11d7f880c305ac7b39']}, + gscites = {0}, } @article{Cohe16, @@ -4779,8 +5981,10 @@ @article{Cohe16 pmid = {27048527}, month = {4}, gsid = {3518501568376626554}, - gscites = {31}, + gscites = {40}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/165779}, + ss_id = {2639e166ecb1dc63dcfe61eefe66b3e2213f17f6}, + all_ss_ids = {['2639e166ecb1dc63dcfe61eefe66b3e2213f17f6']}, } @article{Cohe16a, @@ -4799,8 +6003,10 @@ @article{Cohe16a optnote = {DIAG, RADIOLOGY}, pmid = {27161068}, gsid = {13638511362314366120}, - gscites = {12}, + gscites = {15}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/171720}, + ss_id = {670931e779bb4fe0dbb9a9bf3c00e5a53ce0f26b}, + all_ss_ids = {['670931e779bb4fe0dbb9a9bf3c00e5a53ce0f26b']}, } @article{Cohe17, @@ -4817,8 +6023,61 @@ @article{Cohe17 optnote = {DIAG}, pmid = {28058482}, gsid = {10862932001783753179}, - gscites = {14}, + gscites = {15}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/178219}, + ss_id = {5f74400c97e716a85f20871b4b477dbca9680664}, + all_ss_ids = {['5f74400c97e716a85f20871b4b477dbca9680664']}, +} + +@article{Coli17, + author = {Colijn, Johanna M. and Buitendijk, Gabri\"{e}lle H.S. and Prokofyeva, Elena and Alves, Dalila and Cachulo, Maria L. and Khawaja, Anthony P. and Cougnard-Gregoire, Audrey and Merle, B\'{e}n\'{e}dicte M.J. and Korb, Christina and Erke, Maja G. and Bron, Alain and Anastasopoulos, Eleftherios and Meester-Smoor, Magda A. and Segato, Tatiana and Piermarocchi, Stefano and de Jong, Paulus T.V.M. and Vingerling, Johannes R. and Topouzis, Fotis and Creuzot-Garcher, Catherine and Bertelsen, Geir and Pfeiffer, Norbert and Fletcher, Astrid E. and Foster, Paul J. and Silva, Rufino and Korobelnik, Jean-Fran\c{c}ois and Delcourt, C\'{e}cile and Klaver, Caroline C.W. and Ajana, Soufiane and Arango-Gonzalez, Blanca and Arndt, Verena and Bhatia, Vaibhav and Bhattacharya, Shomi S. and Biarn\'{e}s, Marc and Borrell, Anna and B\"{u}hren, Sebastian and Calado, Sofia M. and Colijn, Johanna M. and Cougnard-Gr\'{e}goire, Audrey and Dammeier, Sascha and de Jong, Eiko K. and De la Cerda, Berta and Delcourt, C\'{e}cile and den Hollander, Anneke I. and Diaz-Corrales, Francisco J. and Diether, Sigrid and Emri, Eszter and Endermann, Tanja and Ferraro, Lucia L. and Garcia, M\'{i}riam and Heesterbeek, Thomas J. and Honisch, Sabina and Hoyng, Carel B. and Kersten, Eveline and Kilger, Ellen and Klaver, Caroline C.W. and Langen, Hanno and Lengyel, Imre and Luthert, Phil and Maugeais, Cyrille and Meester-Smoor, Magda and Merle, B\'{e}n\'{e}dicte M.J. and Mon\'{e}s, Jordi and Nogoceke, Everson and Peto, Tunde and Pool, Frances M. and Rodr\'{i}guez, Eduardo and Ueffing, Marius and Ulrich Bartz-Schmidt, Karl U. and van Leeuwen, Elisabeth M. and Verzijden, Timo and Zumbansen, Markus and Acar, Niyazi and Anastosopoulos, Eleftherios and Azuara-Blanco, Augusto and Bergen, Arthur and Bertelsen, Geir and Binquet, Christine and Bird, Alan and Br\'{e}tillon, Lionel and Bron, Alain and Buitendijk, Gabrielle and Cachulo, Maria Luz and Chakravarthy, Usha and Chan, Michelle and Chang, Petrus and Colijn, Johanna and Cougnard-Gr\'{e}goire, Audrey and Creuzot-Garcher, Catherine and Cumberland, Philippa and Cunha-Vaz, Jos\'{e} and Daien, Vincent and Deak, Gabor and Delcourt, C\'{e}cile and Delyfer, Marie-No\"{e}lle and den Hollander, Anneke and Dietzel, Martha and Erke, Maja Gran and Fauser, Sascha and Finger, Robert and Fletcher, Astrid and Foster, Paul and Founti, Panayiota and G\"{o}bel, Arno and Gorgels, Theo and Grauslund, Jakob and Grus, Franz and Hammond, Christopher and Helmer, Catherine and Hense, Hans-Werner and Hermann, Manuel and Hoehn, Ren\'{e} and Hogg, Ruth and Holz, Frank and Hoyng, Carel and Jansonius, Nomdo and Janssen, Sarah and Khawaja, Anthony and Klaver, Caroline and Korobelnik, Jean-Fran\c{c}ois and Lamparter, Julia and Le Goff, M\'{e}lanie and Leal, Sergio and Lechanteur, Yara and Lehtim\"{a}ki, Terho and Lotery, Andrew and Leung, Irene and Mauschitz, Matthias and Merle, B\'{e}n\'{e}dicte and Meyer zu Westrup, Verena and Midena, Edoardo and Miotto, Stefania and Mirshahi, Alireza and Mohan-Sa\"{i}d, Sadek and Mueller, Michael and Muldrew, Alyson and Nunes, Sandrina and Oexle, Konrad and Peto, Tunde and Piermarocchi, Stefano and Prokofyeva, Elena and Rahi, Jugnoo and Raitakari, Olli and Ribeiro, Luisa and Rougier, Marie-B\'{e}n\'{e}dicte and Sahel, Jos\'{e} and Salonikiou, Aggeliki and Sanchez, Clarisa and Schmitz-Valckenberg, Steffen and Schweitzer, C\'{e}dric and Segato, Tatiana and Shehata, Jasmin and Silva, Rufino and Silvestri, Giuliana and Simader, Christian and Souied, Eric and Springelkamp, Henriet and Tapp, Robyn and Topouzis, Fotis and Verhoeven, Virginie and Von Hanno, Therese and Vujosevic, Stela and Williams, Katie and Wolfram, Christian and Yip, Jennifer and Zerbib, Jennyfer and Zwiener, Isabella}, + title = {~~Prevalence of Age-Related Macular Degeneration in Europe}, + doi = {10.1016/j.ophtha.2017.05.035}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ophtha.2017.05.035}, + file = {Coli17.pdf:pdf\\Coli17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Ophthalmology}, + automatic = {yes}, + citation-count = {299}, + pages = {1753-1763}, + volume = {124}, + pmid = {28712657}, +} + +@article{Coli19, + author = {Colijn, Johanna M. and den Hollander, Anneke I. and Demirkan, Ayse and Cougnard-Gr\'{e}goire, Audrey and Verzijden, Timo and Kersten, Eveline and Meester-Smoor, Magda A. and Merle, Benedicte M.J. and Papageorgiou, Grigorios and Ahmad, Shahzad and Mulder, Monique T. and Costa, Miguel Angelo and Benlian, Pascale and Bertelsen, Geir and Bron, Alain M. and Claes, Birte and Creuzot-Garcher, Catherine and Erke, Maja Gran and Fauser, Sascha and Foster, Paul J. and Hammond, Christopher J. and Hense, Hans-Werner and Hoyng, Carel B. and Khawaja, Anthony P. and Korobelnik, Jean-Francois and Piermarocchi, Stefano and Segato, Tatiana and Silva, Rufino and Souied, Eric H. and Williams, Katie M. and van Duijn, Cornelia M. and Delcourt, C\'{e}cile and Klaver, Caroline C.W. and Acar, Niyazi and Altay, Lebriz and Anastosopoulos, Eleftherios and Azuara-Blanco, Augusto and Berendschot, Tos and Berendschot, Tos and Bergen, Arthur and Bertelsen, Geir and Binquet, Christine and Bird, Alan and Bobak, Martin and Larsen, Morten B\ogelund and Boon, Camiel and Bourne, Rupert and Br\'{e}tillon, Lionel and Broe, Rebecca and Bron, Alain and Buitendijk, Gabrielle and Cachulo, Maria Luz and Capuano, Vittorio and Carri\`{e}re, Isabelle and Chakravarthy, Usha and Chan, Michelle and Chang, Petrus and Colijn, Johanna and Cougnard-Gr\'{e}goire, Audrey and Cree, Angela and Creuzot-Garcher, Catherine and Cumberland, Phillippa and Cunha-Vaz, Jos\'{e} and Daien, Vincent and De Jong, Eiko and Deak, Gabor and Delcourt, C\'{e}cile and Delyfer, Marie-No\"{e}lle and Hollander, Anneke den and Dietzel, Martha and Erke, Maja Gran and Faria, Pedro and Farinha, Claudia and Fauser, Sascha and Finger, Robert and Fletcher, Astrid and Foster, Paul and Founti, Panayiota and Gorgels, Theo and Grauslund, Jakob and Grus, Franz and Hammond, Christopher and Heesterbeek, Thomas and Hense, Hans-Werner and Hermann, Manuel and Hoehn, Ren\'{e} and Hogg, Ruth and Holz, Frank and Hoyng, Carel and Jansonius, Nomdo and Janssen, Sarah and de Jong, Eiko and Khawaja, Anthony and Klaver, Caroline and Korobelnik, Jean-Fran\c{c}ois and Lamparter, Julia and Le Goff, M\'{e}lanie and Lehtim\"{a}ki, Terho and Leung, Irene and Lotery, Andrew and Mauschitz, Matthias and Meester, Magda and Merle, B\'{e}n\'{e}dicte and Meyer zu Westrup, Verena and Midena, Edoardo and Miotto, Stefania and Mirshahi, Alireza and Mohan-Sa\"{i}d, Sadek and Mueller, Michael and Muldrew, Alyson and Murta, Joaquim and Nickels, Stefan and Nunes, Sandrina and Owen, Christopher and Peto, Tunde and Pfeiffer, Norbert and Piermarocchi, Stefano and Prokofyeva, Elena and Rahi, Jugnoo and Raitakari, Olli and Rauscher, Franziska and Ribeiro, Luisa and Rougier, Marie-B\'{e}n\'{e}dicte and Rudnicka, Alicja and Sahel, Jos\'{e} and Salonikiou, Aggeliki and Sanchez, Clarisa and Schick, Tina and Schmitz-Valckenberg, Steffen and Schuster, Alexander and Schweitzer, C\'{e}dric and Segato, Tatiana and Shehata, Jasmin and Silva, Rufino and Silvestri, Giuliana and Simader, Christian and Souied, Eric and Speckauskas, Martynas and Springelkamp, Henriet and Tapp, Robyn and Topouzis, Fotis and van Leeuwen, Elisa and Verhoeven, Virginie and Verzijden, Timo and Vingerling, Hans and Von Hanno, Therese and Williams, Katie and Wolfram, Christian and Yip, Jennifer and Zerbib, Jennyfer and Ajana, Soufiane and Arango-Gonzalez, Blanca and Arndt, Verena and Bhatia, Vaibhav and Bhattacharya, Shomi S. and Biarn\'{e}s, Marc and Borrell, Anna and B\"{u}hren, Sebastian and Calado, Sofia M. and Colijn, Johanna M. and Cougnard-Gr\'{e}goire, Audrey and Dammeier, Sascha and de Jong, Eiko K. and De la Cerda, Berta and Delcourt, C\'{e}cile and den Hollander, Anneke I. and Diaz-Corrales, Francisco J. and Diether, Sigrid and Emri, Eszter and Endermann, Tanja and Ferraro, Lucia L. and Garcia, M\'{i}riam and Heesterbeek, Thomas J. and Honisch, Sabina and Hoyng, Carel B. and Kersten, Eveline and Kilger, Ellen and Klaver, Caroline C.W. and Langen, Hanno and Lengyel, Imre and Luthert, Phil and Maugeais, Cyrille and Meester-Smoor, Magda and Merle Inserm, B\'{e}n\'{e}dicte M.J. and Mon\'{e}s, Jordi and Nogoceke, Everson and Peto, Tunde and Pool, Frances M. and Rodr\'{i}guez, Eduardo and Ueffing, Marius and Ulrich Bartz-Schmidt, Karl U. and van Leeuwen, Elisabeth M. and Verzijden, Timo and Zumbansen, Markus}, + title = {~~Increased High-Density Lipoprotein Levels Associated with Age-Related Macular Degeneration}, + doi = {10.1016/j.ophtha.2018.09.045}, + year = {2019}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ophtha.2018.09.045}, + file = {Coli19.pdf:pdf\\Coli19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Ophthalmology}, + automatic = {yes}, + citation-count = {74}, + pages = {393-406}, + volume = {126}, + pmid = {30315903}, +} + +@article{Coli21, + author = {Colijn, Johanna M. and Liefers, Bart and Joachim, Nichole and Verzijden, Timo and Meester-Smoor, Magda A. and Biarn\'{e}s, Marc and Mon\'{e}s, Jordi and de Jong, Paulus T. V. M. and Vingerling, Johannes R. and Mitchell, Paul and S\'{a}nchez, Clara I. and Wang, Jie J. and Klaver, Caroline C. W. and EyeNED Reading Center and EYE-RISK Consortium}, + title = {~~Enlargement of Geographic Atrophy From First Diagnosis to End of Life}, + doi = {10.1001/jamaophthalmol.2021.1407}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1001/jamaophthalmol.2021.1407}, + file = {Coli21.pdf:pdf\\Coli21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {JAMA Ophthalmology}, + automatic = {yes}, + citation-count = {10}, + pages = {743}, + volume = {139}, + pmid = {34014262}, } @article{Dale04, @@ -4835,8 +6094,10 @@ @article{Dale04 pmid = {15656285}, month = {11}, gsid = {1953127824396335218}, - gscites = {24}, + gscites = {26}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/57635}, + ss_id = {1536728fbcefe01ebf1d6f670b2308fac4565910}, + all_ss_ids = {['1536728fbcefe01ebf1d6f670b2308fac4565910']}, } @article{Dale07, @@ -4853,7 +6114,9 @@ @article{Dale07 pmid = {17460540}, month = {6}, gsid = {9089375917348682570}, - gscites = {122}, + gscites = {125}, + ss_id = {98607603fde29967bbef46cb8e5915c2d0b2d739}, + all_ss_ids = {['98607603fde29967bbef46cb8e5915c2d0b2d739']}, } @conference{Dalm15, @@ -4889,7 +6152,23 @@ @article{Dalm16 pmid = {26745902}, month = {12}, gsid = {1749839565429400652}, - gscites = {24}, + gscites = {31}, + ss_id = {2662ff8ddd7135754bc72ca833eacfc1f307aeb1}, + all_ss_ids = {['14198a817d2c0a800bfb0a0a36baf5097fe22054', '2662ff8ddd7135754bc72ca833eacfc1f307aeb1']}, +} + +@inproceedings{Dalm16a, + author = {Ufuk Dalmi\c{s}, Mehmet and Gubern-M\'{e}rida, Albert and Borelli, Cristina and Vreemann, Suzan and Mann, Ritse M. and Karssemeijer, Nico}, + title = {~~A fully automated system for quantification of background parenchymal enhancement in breast DCE-MRI}, + doi = {10.1117/12.2211640}, + year = {2016}, + abstract = {Background parenchymal enhancement (BPE) observed in breast dynamic contrast enhanced magnetic resonance imaging (DCE-MRI) has been identified as an important biomarker associated with risk for developing breast cancer. In this study, we present a fully automated framework for quantification of BPE. We initially segmented fibroglandular tissue (FGT) of the breasts using an improved version of an existing method. Subsequently, we computed BPEabs (volume of the enhancing tissue), BPErf (BPEabs divided by FGT volume) and BPErb (BPEabs divided by breast volume), using different relative enhancement threshold values between 1% and 100%. To evaluate and compare the previous and improved FGT segmentation methods, we used 20 breast DCE-MRI scans and we computed Dice similarity coefficient (DSC) values with respect to manual segmentations. For evaluation of the BPE quantification, we used a dataset of 95 breast DCE-MRI scans. Two radiologists, in individual reading sessions, visually analyzed the dataset and categorized each breast into minimal, mild, moderate and marked BPE. To measure the correlation between automated BPE values to the radiologists' assessments, we converted these values into ordinal categories and we used Spearman's rho as a measure of correlation. According to our results, the new segmentation method obtained an average DSC of 0.81 0.09, which was significantly higher (p<0.001) compared to the previous method (0.76 0.10). The highest correlation values between automated BPE categories and radiologists' assessments were obtained with the BPErf measurement (r=0.55, r=0.49, p<0.001 for both), while the correlation between the scores given by the two radiologists was 0.82 (p<0.001). The presented framework can be used to systematically investigate the correlation between BPE and risk in large screening cohorts.}, + url = {http://dx.doi.org/10.1117/12.2211640}, + file = {Dalm16a.pdf:pdf\\Dalm16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2016: Computer-Aided Diagnosis}, + automatic = {yes}, + citation-count = {2}, } @article{Dalm17, @@ -4907,8 +6186,10 @@ @article{Dalm17 optnote = {DIAG, RADIOLOGY}, pmid = {28035663}, gsid = {9993343296257816404}, - gscites = {113}, + gscites = {173}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/173062}, + ss_id = {11f7a41abab06b8792057fa46cd3de08b22ac7d0}, + all_ss_ids = {['11f7a41abab06b8792057fa46cd3de08b22ac7d0']}, } @article{Dalm18, @@ -4926,7 +6207,9 @@ @article{Dalm18 optnote = {DIAG}, pmid = {29340287}, gsid = {14025750273420770243}, - gscites = {20}, + gscites = {54}, + ss_id = {c752bf23b7c12e6f81b6d2b014e29597095a8f94}, + all_ss_ids = {['c752bf23b7c12e6f81b6d2b014e29597095a8f94']}, } @article{Dalm19, @@ -4944,7 +6227,9 @@ @article{Dalm19 pmid = {30652985}, month = {6}, gsid = {7865697668757504203}, - gscites = {12}, + gscites = {69}, + ss_id = {78a25fcd0825f006d8326a932d681e9420a9b77f}, + all_ss_ids = {['78a25fcd0825f006d8326a932d681e9420a9b77f']}, } @phdthesis{Dalm19a, @@ -4975,6 +6260,18 @@ @article{Dama22 year = {2022}, } +@conference{Dama23, + author = {Marina D'Amato and Maschenka Balkenhol and Mart van Rijthoven and Jeroen van der Laak and Francesco Ciompi}, + booktitle = {MIDL}, + title = {On the robustness of regressing tumor percentage as an explainable detector in histopathology whole-slide images}, + abstract = {In recent years, Multiple Instance Learning (MIL) approaches have gained popularity to address the task of weakly-supervised tumor detection in whole-slide images (WSIs). + However, standard MIL relies on classification methods for tumor detection that require negative control, i.e., tumor-free cases, which are challenging to obtain in real-world clinical scenarios, especially when considering surgical resection specimens. + Inspired by recent work, in this paper we tackle tumor detection via a MIL-like weakly-supervised regression approach to predict the percentage of tumor present in WSIs, a clinically available target that allows to overcome the problem of need for manual annotations or presence of tumor-free slides. + We characterize the quality of such a target by investigating its robustness in the presence of noise on regression percentages and provide explainability through attention maps. We test our approach on breast cancer data from primary tumor and lymph node metastases.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + @inproceedings{Dana97, author = {K. J. Dana and B. van Ginneken and S. K. Nayar and J. J. Koenderink}, title = {Reflectance and texture of real-world surfaces}, @@ -4986,6 +6283,9 @@ @inproceedings{Dana97 file = {Dana97.pdf:pdf\\Dana97.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {1}, + ss_id = {8a07de049e417cc61442d8c1ecc576155f6777a8}, + all_ss_ids = {['8a07de049e417cc61442d8c1ecc576155f6777a8']}, + gscites = {85}, } @article{Dana99, @@ -5003,6 +6303,8 @@ @article{Dana99 number = {1}, month = {1}, gscites = {1745}, + ss_id = {1e0bf0fbefbb0404c3c18740123a969091c67ef4}, + all_ss_ids = {['1e0bf0fbefbb0404c3c18740123a969091c67ef4']}, } @article{Davi18, @@ -5013,6 +6315,9 @@ @article{Davi18 abstract = {Chest X-rays are one of the most commonly used technologies for medical diagnosis. Many deep learning models have been proposed to improve and automate the abnormality detection task on this type of data. In this paper, we propose a different approach based on image inpainting under adversarial training first introduced by Goodfellow et al. We configure the context encoder model for this task and train it over 1.1M 128x128 images from healthy X-rays. The goal of our model is to reconstruct the missing central 64x64 patch. Once the model has learned how to inpaint healthy tissue, we test its performance on images with and without abnormalities. We discuss and motivate our results considering PSNR, MSE and SSIM scores as evaluation metrics. In addition, we conduct a 2AFC observer study showing that in half of the times an expert is unable to distinguish real images from the ones reconstructed using our model. By computing and visualizing the pixel-wise difference between source and reconstructed images, we can highlight abnormalities to simplify further detection and classification tasks.}, optnote = {DIAG}, month = {12}, + ss_id = {cb8d5a5a21c99bdf5ad5734742c04229c978401d}, + all_ss_ids = {['cb8d5a5a21c99bdf5ad5734742c04229c978401d']}, + gscites = {0}, } @article{DeSo19, @@ -5030,7 +6335,9 @@ @article{DeSo19 pmid = {31468205}, year = {2019}, gsid = {16227468342087800522}, - gscites = {3}, + gscites = {61}, + ss_id = {4d1a108034c95b70c51ffcc73cec398e312b3446}, + all_ss_ids = {['4d1a108034c95b70c51ffcc73cec398e312b3446']}, } @inproceedings{Deba10, @@ -5049,6 +6356,8 @@ @inproceedings{Deba10 month = {3}, gsid = {18035026185759165920}, gscites = {2}, + ss_id = {6d0c6c2a9a8c9d13e071fe9b6d76e57f3c301a33}, + all_ss_ids = {['6d0c6c2a9a8c9d13e071fe9b6d76e57f3c301a33']}, } @article{Deba11, @@ -5066,8 +6375,10 @@ @article{Deba11 pmid = {22047383}, month = {10}, gsid = {1254525851921315650}, - gscites = {12}, + gscites = {18}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/96662}, + ss_id = {5fd64cf068745acfc9df060a4aecb7ff6e68ead4}, + all_ss_ids = {['5fd64cf068745acfc9df060a4aecb7ff6e68ead4']}, } @conference{Deba11a, @@ -5097,6 +6408,8 @@ @article{Deba16 gsid = {4915040609318816659}, gscites = {2}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/171995}, + ss_id = {70079b1af89534ad1caa342fdac269789d31459c}, + all_ss_ids = {['70079b1af89534ad1caa342fdac269789d31459c']}, } @article{Deba16a, @@ -5115,6 +6428,8 @@ @article{Deba16a gsid = {14959316295455027146}, gscites = {7}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/165687}, + ss_id = {7b919a690fea032f8b55339c2a0f4abc56add55b}, + all_ss_ids = {['7b919a690fea032f8b55339c2a0f4abc56add55b']}, } @article{Deba19, @@ -5131,6 +6446,9 @@ @article{Deba19 pmid = {31772836}, month = {11}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/215826}, + ss_id = {663b19914b7fe99a2c3cc1641ba8d47109d63db2}, + all_ss_ids = {['663b19914b7fe99a2c3cc1641ba8d47109d63db2']}, + gscites = {11}, } @article{Dekk20, @@ -5145,6 +6463,9 @@ @article{Dekk20 optnote = {DIAG, RADIOLOGY}, file = {Dekk20.pdf:pdf\\Dekk20.pdf:PDF}, abstract = {Myocardial perfusion imaging (MPI) is an accurate noninvasive test for patients with suspected obstructive coronary artery disease (CAD) and coronary artery calcium (CAC) score is known to be a powerful predictor of cardiovascular events. Collection of CAC scores simultaneously with MPI is unexplored. We aimed to investigate whether automatically derived CAC scores during myocardial perfusion imaging would further improve the diagnostic accuracy of MPI to detect obstructive CAD. We analyzed 150 consecutive patients without a history of coronary revascularization with suspected obstructive CAD who were referred for 82Rb PET/CT and available coronary angiographic data. Myocardial perfusion was evaluated both semi quantitatively as well as quantitatively according to the European guidelines. CAC scores were automatically derived from the low-dose attenuation correction CT scans using previously developed software based on deep learning. Obstructive CAD was defined as stenosis >70% (or >50% in the left main coronary artery) and/or fractional flow reserve (FFR) <=0.80. In total 58% of patients had obstructive CAD of which seventy-four percent were male. Addition of CAC scores to MPI and clinical predictors significantly improved the diagnostic accuracy of MPI to detect obstructive CAD. The area under the curve (AUC) increased from 0.87 to 0.91 (p: 0.025). Sensitivity and specificity analysis showed an incremental decrease in false negative tests with our MPI+CAC approach (n=14 to n=4), as a consequence an increase in false positive tests was seen (n=11 to n=28). CAC scores collected simultaneously with MPI improve the detection of obstructive coronary artery disease in patients without a history of coronary revascularization.}, + ss_id = {583fa25f21581a7bf39510373038cb9d69004232}, + all_ss_ids = {['583fa25f21581a7bf39510373038cb9d69004232']}, + gscites = {13}, } @article{Dekk21, @@ -5160,6 +6481,26 @@ @article{Dekk21 abstract = {Plasma osteoprotegerin (OPG) and vascular smooth muscle cell (VSMC) derived extracellular vesicles (EVs) are important regulators in the process of vascular calcification (VC). In population studies, high levels of OPG are associated with events. In animal studies, however, high OPG levels result in reduction of VC. VSMC-derived EVs are assumed to be responsible for OPG transport and VC but this role has not been studied. For this, we investigated the association between OPG in plasma and circulating EVs with coronary artery calcium (CAC) as surrogate for VC in symptomatic patients. We retrospectively assessed 742 patients undergoing myocardial perfusion imaging (MPI). CAC scores were determined on the MPI-CT images using a previously developed automated algorithm. Levels of OPG were quantified in plasma and two EV-subpopulations (LDL and TEX), using an electrochemiluminescence immunoassay. Circulating levels of OPG were independently associated with CAC scores in plasma; OR 1.39 (95\% CI 1.17\textendash 1.65), and both EV populations; EV-LDL; OR 1.51 (95\% CI 1.27\textendash 1.80) and EV-TEX; OR 1.21 (95\% CI 1.02\textendash 1.42). High levels of OPG in plasma were independently associated with CAC scores in this symptomatic patient cohort. High levels of EV-derived OPG showed the same positive association with CAC scores, suggesting that EV-derived OPG mirrors the same pathophysiological process as plasma OPG.}, optnote = {DIAG, RADIOLOGY}, algorithm = {https://grand-challenge.org/algorithms/calcium-scoring/}, + ss_id = {f8fd6c688a6da313e69e95559c1db7cfa1fb04d0}, + all_ss_ids = {['f8fd6c688a6da313e69e95559c1db7cfa1fb04d0']}, + gscites = {9}, +} + +@article{Delc18, + author = {Delcourt, C\'{e}cile and Le Goff, M\'{e}lanie and von Hanno, Therese and Mirshahi, Alireza and Khawaja, Anthony P. and Verhoeven, Virginie J.M. and Hogg, Ruth E. and Anastosopoulos, Eleftherios and Cachulo, Maria Luz and H\"{o}hn, Ren\'{e} and Wolfram, Christian and Bron, Alain and Miotto, Stefania and Carri\`{e}re, Isabelle and Colijn, Johanna M. and Buitendijk, Gabri\"{e}lle H.S. and Evans, Jennifer and Nitsch, Dorothea and Founti, Panayiota and Yip, Jennifer L.Y. and Pfeiffer, Norbert and Creuzot-Garcher, Catherine and Silva, Rufino and Piermarocchi, Stefano and Topouzis, Fotis and Bertelsen, Geir and Foster, Paul J. and Fletcher, Astrid and Klaver, Caroline C.W. and Korobelnik, Jean-Fran\c{c}ois and Acar, Niyazi and Anastosopoulos, Eleftherios and Azuara-Blanco, Augusto and Berendschot, Tos and Bergen, Arthur and Bertelsen, Geir and Binquet, Christine and Bird, Alan and Bobak, Martin and Boon, Camiel and Br\'{e}tillon, Lionel and Broe, Rebecca and Bron, Alain and Buitendijk, Gabrielle and Cachulo, Maria Luz and Capuano, Vittorio and Carri\`{e}re, Isabelle and Chakravarthy, Usha and Chan, Michelle and Chang, Petrus and Colijn, Johanna and Cougnard-Gr\'{e}goire, Audrey and Cree, Angela and Creuzot-Garcher, Catherine and Cumberland, Phillippa and Cunha-Vaz, Jos\'{e} and Daien, Vincent and De Jong, Eiko and Deak, Gabor and Delcourt, C\'{e}cile and Delyfer, Marie-No\"{e}lle and den Hollander, Anneke and Dietzel, Martha and Erke, Maja Gran and Faria, Pedro and Farinha, Claudia and Fauser, Sascha and Finger, Robert and Fletcher, Astrid and Foster, Paul and Founti, Panayiota and Gorgels, Theo and Grauslund, Jakob and Grus, Franz and Hammond, Christopher and Hansen, Morten and Helmer, Catherine and Hense, Hans-Werner and Hermann, Manuel and Hoehn, Ren\'{e} and Hogg, Ruth and Holz, Frank and Hoyng, Carel and Jansonius, Nomdo and Janssen, Sarah and Kersten, Eveline and Khawaja, Anthony and Klaver, Caroline and Korobelnik, Jean-Fran\c{c}ois and Lamparter, Julia and Le Goff, M\'{e}lanie and Lechanteur, Yara and Lehtim\"{a}ki, Terho and Leung, Irene and Lotery, Andrew and Mauschitz, Matthias and Meester, Magda and Merle, B\'{e}n\'{e}dicte and Meyer zu Westrup, Verena and Midena, Edoardo and Miotto, Stefania and Mirshahi, Alireza and Mohan-Sa\"{i}d, Sadek and Mueller, Michael and Muldrew, Alyson and Murta, Joaquim and Nickels, Stefan and Nunes, Sandrina and Owen, Christopher and Peto, Tunde and Pfeiffer, Norbert and Piermarocchi, Stefano and Prokofyeva, Elena and Rahi, Jugnoo and Raitakari, Olli and Rauscher, Franziska and Ribeiro, Luisa and Rougier, Marie-B\'{e}n\'{e}dicte and Rudnicka, Alicja and Sahel, Jos\'{e} and Salonikiou, Aggeliki and Sanchez, Clarisa and Schmitz-Valckenberg, Steffen and Schouten, Johannes and Schuster, Alexander and Schweitzer, C\'{e}dric and Segato, Tatiana and Shehata, Jasmin and Silva, Rufino and Silvestri, Giuliana and Simader, Christian and Souied, Eric and Speckauskas, Martynas and Springelkamp, Henriet and Tapp, Robyn and Topouzis, Fotis and van Leeuwen, Elisa and Verhoeven, Virginie and Verzijden, Timo and Von Hanno, Therese and Vujosevic, Stela and Wiedemann, Peter and Williams, Katie and Wolfram, Christian and Yip, Jennifer and Zerbib, Jennyfer}, + title = {~~The Decreasing Prevalence of Nonrefractive Visual Impairment in Older Europeans}, + doi = {10.1016/j.ophtha.2018.02.005}, + year = {2018}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ophtha.2018.02.005}, + file = {Delc18.pdf:pdf\\Delc18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Ophthalmology}, + automatic = {yes}, + citation-count = {16}, + pages = {1149-1159}, + volume = {125}, + pmid = {29548645}, } @article{Demi09, @@ -5201,7 +6542,9 @@ @inproceedings{Derck19 file = {Derck19.pdf:pdf\\Derck19.pdf:PDF}, optnote = {DIAG}, gsid = {12320966003094629420}, - gscites = {2}, + gscites = {6}, + ss_id = {8da1812001d6d2753f2ef55fa5e50e39a648030a}, + all_ss_ids = {['8da1812001d6d2753f2ef55fa5e50e39a648030a']}, } @conference{Dese09, @@ -5261,6 +6604,9 @@ @article{Dett14 number = {4}, pmid = {24713820}, month = {4}, + ss_id = {8b8211800c55a1cf2cb5b9809294d09bc21b8168}, + all_ss_ids = {['8b8211800c55a1cf2cb5b9809294d09bc21b8168']}, + gscites = {9}, } @inproceedings{Deva11, @@ -5293,8 +6639,10 @@ @article{Deva17 optnote = {DIAG, RADIOLOGY}, pmid = {28825886}, gsid = {2824529338802696200}, - gscites = {48}, + gscites = {105}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/181824}, + ss_id = {f2ea47c7c53d8728b0ba4f4f0edb945edf877d4c}, + all_ss_ids = {['f2ea47c7c53d8728b0ba4f4f0edb945edf877d4c']}, } @inproceedings{Diez14, @@ -5310,6 +6658,8 @@ @inproceedings{Diez14 optnote = {DIAG, RADIOLOGY}, gsid = {2740440396846849245}, gscites = {10}, + ss_id = {5ca3d1d79becff9d2683985814dee4fc47e882c0}, + all_ss_ids = {['5ca3d1d79becff9d2683985814dee4fc47e882c0']}, } @article{Dijc11, @@ -5326,6 +6676,9 @@ @article{Dijc11 number = {6}, pmid = {21942620}, month = {9}, + ss_id = {de4199466e65c57d27343bcc67a9261037d304cc}, + all_ss_ids = {['de4199466e65c57d27343bcc67a9261037d304cc']}, + gscites = {1}, } @conference{Dijk10b, @@ -5369,8 +6722,10 @@ @article{Dijk15 pmid = {25517131}, month = {3}, gsid = {13303851726201550707}, - gscites = {20}, + gscites = {26}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/153524}, + ss_id = {32cfbed76bd55d2aa3c2fa54652941d01994dc35}, + all_ss_ids = {['32cfbed76bd55d2aa3c2fa54652941d01994dc35']}, } @article{Dilz19, @@ -5381,6 +6736,9 @@ @article{Dilz19 abstract = {We introduce the learned simultaneous iterative reconstruction technique (SIRT) for tomographic reconstruction. The learned SIRT algorithm is a deep learning based reconstruction method combining model knowledge with a learned component. The algorithm is trained by mapping raw measured data to the reconstruction results over several iterations. The Learned SIRT algorithm is applied to a cone beam geometry on a circular orbit, a challenging problem for learned methods due to its 3D geometry and its inherent inability to completely capture the patient anatomy. A comparison of 2D reconstructions is shown, where the learned SIRT approach produces reconstructions with superior peak signal to noise ratio (PSNR) and structural similarity (SSIM), compared to FBP, SIRT and U-net post-processing and similar PSNR and SSIM compared to the learned primal dual algorithm. Similar results are shown for cone beam geometry reconstructions of a 3D Shepp Logan phantom, where we obtain between 9.9 and 28.1 dB improvement over FBP with a substantial improvement in SSIM. Finally we show that our algorithm scales to clinically relevant problems, and performs well when applied to measurements of a physical phantom.}, optnote = {DIAG}, month = {8}, + ss_id = {b484f95f94ef39029ba3acd3f481bc729172eef5}, + all_ss_ids = {['b484f95f94ef39029ba3acd3f481bc729172eef5']}, + gscites = {1}, } @conference{Dine10, @@ -5403,7 +6761,59 @@ @inproceedings{Dong10 file = {Dong10.pdf:pdf\\Dong10.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {17657664511689579052}, - gscites = {25}, + gscites = {29}, + ss_id = {644ca5e97840d8532c7bdcf142cb49e398ae4ea0}, + all_ss_ids = {['644ca5e97840d8532c7bdcf142cb49e398ae4ea0']}, +} + +@article{Donn19, + author = {Donnelly, J Peter and Chen, Sharon C and Kauffman, Carol A and Steinbach, William J and Baddley, John W and Verweij, Paul E and Clancy, Cornelius J and Wingard, John R and Lockhart, Shawn R and Groll, Andreas H and Sorrell, Tania C and Bassetti, Matteo and Akan, Hamdi and Alexander, Barbara D and Andes, David and Azoulay, Elie and Bialek, Ralf and Bradsher, Robert W and Bretagne, Stephane and Calandra, Thierry and Caliendo, Angela M and Castagnola, Elio and Cruciani, Mario and Cuenca-Estrella, Manuel and Decker, Catherine F and Desai, Sujal R and Fisher, Brian and Harrison, Thomas and Heussel, Claus Peter and Jensen, Henrik E and Kibbler, Christopher C and Kontoyiannis, Dimitrios P and Kullberg, Bart-Jan and Lagrou, Katrien and Lamoth, Fr\'{e}d\'{e}ric and Lehrnbecher, Thomas and Loeffler, Jurgen and Lortholary, Olivier and Maertens, Johan and Marchetti, Oscar and Marr, Kieren A and Masur, Henry and Meis, Jacques F and Morrisey, C Orla and Nucci, Marcio and Ostrosky-Zeichner, Luis and Pagano, Livio and Patterson, Thomas F and Perfect, John R and Racil, Zdenek and Roilides, Emmanuel and Ruhnke, Marcus and Prokop, Cornelia Schaefer and Shoham, Shmuel and Slavin, Monica A and Stevens, David A and Thompson, George R and Vazquez, Jose A and Viscoli, Claudio and Walsh, Thomas J and Warris, Adilia and Wheat, L Joseph and White, P Lewis and Zaoutis, Theoklis E and Pappas, Peter G}, + title = {~~Revision and Update of the Consensus Definitions of Invasive Fungal Disease From the European Organization for Research and Treatment of Cancer and the Mycoses Study Group Education and Research Consortium}, + doi = {10.1093/cid/ciz1008}, + year = {2019}, + abstract = {Abstract + + Background + Invasive fungal diseases (IFDs) remain important causes of morbidity and mortality. The consensus definitions of the Infectious Diseases Group of the European Organization for Research and Treatment of Cancer and the Mycoses Study Group have been of immense value to researchers who conduct clinical trials of antifungals, assess diagnostic tests, and undertake epidemiologic studies. However, their utility has not extended beyond patients with cancer or recipients of stem cell or solid organ transplants. With newer diagnostic techniques available, it was clear that an update of these definitions was essential. + + + Methods + To achieve this, 10 working groups looked closely at imaging, laboratory diagnosis, and special populations at risk of IFD. A final version of the manuscript was agreed upon after the groups' findings were presented at a scientific symposium and after a 3-month period for public comment. There were several rounds of discussion before a final version of the manuscript was approved. + + + Results + There is no change in the classifications of "proven," "probable," and "possible" IFD, although the definition of "probable" has been expanded and the scope of the category "possible" has been diminished. The category of proven IFD can apply to any patient, regardless of whether the patient is immunocompromised. The probable and possible categories are proposed for immunocompromised patients only, except for endemic mycoses. + + + Conclusions + These updated definitions of IFDs should prove applicable in clinical, diagnostic, and epidemiologic research of a broader range of patients at high-risk. + }, + url = {http://dx.doi.org/10.1093/cid/ciz1008}, + file = {Donn19.pdf:pdf\\Donn19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Clinical Infectious Diseases}, + automatic = {yes}, + citation-count = {1186}, + pages = {1367-1376}, + volume = {71}, + pmid = {31802125}, +} + +@article{Doop23, + author = {Dooper, Stephan and Pinckaers, Hans and Aswolinskiy, Witali and Hebeda, Konnie and Jarkman, Sofia and van der Laak, Jeroen and Litjens, Geert}, + title = {~~Gigapixel end-to-end training using streaming and attention}, + doi = {10.1016/j.media.2023.102881}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2023.102881}, + file = {Doop23.pdf:pdf\\Doop23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + automatic = {yes}, + citation-count = {3}, + pages = {102881}, + volume = {88}, + pmid = {37437452}, } @conference{Durm12, @@ -5430,7 +6840,9 @@ @article{Durm14 pmid = {24448598}, month = {1}, gsid = {14089140236204567566}, - gscites = {24}, + gscites = {27}, + ss_id = {c20d695eb6eabe26c449e5e4a75d214c92b636dd}, + all_ss_ids = {['c20d695eb6eabe26c449e5e4a75d214c92b636dd']}, } @article{Durm14a, @@ -5449,6 +6861,8 @@ @article{Durm14a month = {3}, gsid = {1224010184676285656}, gscites = {54}, + ss_id = {dc399a6d32501c1b878002ed27dfd7c54ab82965}, + all_ss_ids = {['dc399a6d32501c1b878002ed27dfd7c54ab82965']}, } @mastersthesis{Eeke20, @@ -5473,6 +6887,9 @@ @inproceedings{Eeke20a file = {:pdf/Eeke20a.pdf:PDF}, optnote = {DIAG}, year = {2020}, + ss_id = {8bc7e464965aabd635e4bc2e76d186d6d2c04e01}, + all_ss_ids = {['8bc7e464965aabd635e4bc2e76d186d6d2c04e01']}, + gscites = {1}, } @article{Eeke21, @@ -5487,16 +6904,19 @@ @article{Eeke21 pmid = {34772487}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/252113}, + ss_id = {191e15acc68311c29c38a4bbfa0769608fa48fbc}, + all_ss_ids = {['191e15acc68311c29c38a4bbfa0769608fa48fbc']}, + gscites = {6}, } @conference{Eeke22, - author = {Leander van Eekelen and Enrico Munari and Ilaria Girolami and Albino Eccher and Jeroen van der Laak and Katrien Grünberg and Monika Looijen-Salamon and Shoko Vos and Francesco Ciompi}, + author = {Leander van Eekelen and Enrico Munari and Ilaria Girolami and Albino Eccher and Jeroen van der Laak and Katrien Grunberg and Monika Looijen-Salamon and Shoko Vos and Francesco Ciompi}, title = {Inter-rater agreement of pathologists on determining PD-L1 status in non-small cell lung cancer}, booktitle = {ECP}, year = {2022}, abstract = {Artificial intelligence (AI) based quantification of cell-level PD-L1 status enables spatial analysis and allows reliable and reproducible assessment of the tumor proportion score. In this study, we assess the cell-level inter-pathologist agreement as human benchmark for AI development and validation. Three pathologists manually annotated the centers of all nuclei within 53 regions of interest in 12 whole- slide images (40X magnification) of NSCLC cases and classified them as PD-L1 negative/positive tumor cells, PD-L1 positive immune cells or other cells. Agreement was quantified using F1 score analysis, with agreement defined as annotations less than 10 um apart and of the same class. An average of 9044 nuclei (1550 negative, 2367 positive tumor cells, 1244 positive immune cells, 3881 other cells) were manually annotated by the three pathologists. The mean F1 score over pairs of pathologists at dataset level was 0.59 (range 0.54-0.65). When split across classes, the mean per-pair F1 scores stay approximately the same, indicating the readers perform similarly regardless of cell type. Besides human variability in manual point annotations with respect to the center of nuclei, lack of context contributed to disagreement: readers who reported they solely examined the ROIs tended to disagree more with readers that reported they also looked outside the ROIs for additional (morphological/density) information. - -In conclusion, agreement on determining the PD-L1 status of individual cells is only moderate, suggesting a role for AI. By quantifying the inter-rater agreement of pathologists, we have created a human benchmark which may serve as an upper bound (and could be combined via majority vote) for the validation of AI at celllevel, something not done previously. Cell-level AI-based assessment of PD-L1 may supersede slide level scoring, adding significant information on the heterogeneity and spatial distribution over the tumor.}, + + In conclusion, agreement on determining the PD-L1 status of individual cells is only moderate, suggesting a role for AI. By quantifying the inter-rater agreement of pathologists, we have created a human benchmark which may serve as an upper bound (and could be combined via majority vote) for the validation of AI at celllevel, something not done previously. Cell-level AI-based assessment of PD-L1 may supersede slide level scoring, adding significant information on the heterogeneity and spatial distribution over the tumor.}, optnote = {DIAG, RADIOLOGY}, } @@ -5506,25 +6926,121 @@ @conference{Eeke22a booktitle = {ECP}, year = {2022}, abstract = {Nuclei detection in histopathology images is an important prerequisite step of downstream research and clinical analyses, such as counting cells and spatial interactions. In this study, we developed an AI-based nuclei detector using the YOLOv5 framework in whole-slide NSCLC cases. Our dataset consisted of 42 PD-L1 stained cases (30 training, 12 test). Four trained (non-expert) readers manually annotated all nuclei (both positive/negative) within regions of interest (ROIs) viewed at 40X magnification. We trained a YOLOv5(s) network on annotations of one reader. Performance was measured using F1 score analysis; hits were defined as being less than 10 um away from annotations. - -We evaluate YOLOv5 on the test set by pairing it against all four readers separately. There, YOLOv5 performs excellently, falling within the interrater variability of the four readers: the mean F1 score over algorithm-reader pairs is 0.84 (range 0.76-0.92) while the mean F1 score over pairs of readers is 0.82 (range 0.76-0.86). When we determine the cell count (number of annotations/predictions) per ROI in the test set, agreement of algorithm-reader pairs and reader pairs is equally well aligned: 0.93 (range 0.90-0.97) versus 0.94 (range 0.92-0.96). Visual inspection indicates YOLOv5 performs equally well on PD-L1 positive and negative cells. - -In future work, we could extend this detector to additional tissues and immunohistochemistry stainings. Moreover, this detector could be used as a AI-assisted manual point annotation tool: while human readers perform the (context-driven) task of delineating homogeneous regions (e.g. clusters of PD-L1positive stained cells), the detector performs the (local, yet laborious) task of identifying individual nuclei within these regions, providing labelled point annotations.}, + + We evaluate YOLOv5 on the test set by pairing it against all four readers separately. There, YOLOv5 performs excellently, falling within the interrater variability of the four readers: the mean F1 score over algorithm-reader pairs is 0.84 (range 0.76-0.92) while the mean F1 score over pairs of readers is 0.82 (range 0.76-0.86). When we determine the cell count (number of annotations/predictions) per ROI in the test set, agreement of algorithm-reader pairs and reader pairs is equally well aligned: 0.93 (range 0.90-0.97) versus 0.94 (range 0.92-0.96). Visual inspection indicates YOLOv5 performs equally well on PD-L1 positive and negative cells. + + In future work, we could extend this detector to additional tissues and immunohistochemistry stainings. Moreover, this detector could be used as a AI-assisted manual point annotation tool: while human readers perform the (context-driven) task of delineating homogeneous regions (e.g. clusters of PD-L1positive stained cells), the detector performs the (local, yet laborious) task of identifying individual nuclei within these regions, providing labelled point annotations.}, optnote = {DIAG, RADIOLOGY}, } +@article{Eeke23, + author = {van Eekelen, Leander and Litjens, Geert and Hebeda, Konnie}, + year = {2023}, + month = {2}, + journal = PATHOB, + title = {Artificial intelligence in bone marrow histological diagnostics: potential applications and challenges.}, + doi = {10.1159/000529701}, + abstract = {The expanding digitalization of routine diagnostic histological slides holds a potential to apply artificial intelligence (AI) to pathology, including bone marrow (BM) histology. In this perspective we describe potential tasks in diagnostics that can be supported, investigations that can be guided and questions that can be answered by the future application of AI on whole slide images of BM biopsies. These range from characterization of cell lineages and quantification of cells and stromal structures to disease prediction. First glimpses show an exciting potential to detect subtle phenotypic changes with AI that are due to specific genotypes. The discussion is illustrated by examples of current AI research using BM biopsy slides. In addition, we briefly discuss current challenges for implementation of AI-supported diagnostics.}, + file = {:pdf/Eeke23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {36791682}, + ss_id = {fc5eb96c03e65d357cb8425a5938f4a6b62be9bd}, + all_ss_ids = {['fc5eb96c03e65d357cb8425a5938f4a6b62be9bd']}, + gscites = {2}, +} + @conference{Eerd18, author = {Anke W van der Eerden and Thomas LA van den Heuvel and Bram H Geurts and Bram Platel and Thijs Vande Vyveree and Luc van den Hauwee and Teuntje MJC Andriessen and Bozena M Goraj and Rashindra Manniesing}, title = {Automatic versus human detection of traumatic cerebral microbleeds on susceptibility weighted imaging}, booktitle = ECR, year = {2018}, abstract = {Purpose: To evaluate the performance of computer-aided traumatic cerebral microbleed detection (CAD) with and without human interference. - Methods and Materials: 33 adult patients admitted to our emergency department with moderate or severe TBI (mean age 33 years, 21 males) underwent a standardized trauma 3T MRI-protocol at 28 weeks. The microbleeds in their SWI-scans were annotated by an expert. A CAD system was developed, based on this training set. Six experts, blind to the CAD-results, annotated a subset of ten patients. In two experiments, we compared the performance of the CAD system to each of these six experts, using the majority voting results of the other five experts as the reference standard for the calculation of performance characteristics (paired t-test). In the first experiment, the performance of fully automatic microbleed detection was assessed. In the second experiment, one expert removed CAD-annotations she considered false positives from the automatically detected microbleeds, and briefly screened the CAD-annotated SWI-scans to complete the dataset with missed definite microbleeds. - Results: Fully manual evaluation took one hour per patient with an average sensitivity of 77% (SD 12.4%). The sensitivity of fully automatic detection of candidate microbleeds was 89% (SD 0.8%). Evaluation of the CAD results by an expert took 13 minutes per patient with a sensitivity of 93% (SD 1.0%) (p < 0.05 versus fully manual evaluation). - Conclusion: This CAD system allows detecting more microbleeds in a reduced reading time. This may facilitate the execution of otherwise too time-consuming large studies on the clinical relevance of microbleeds.}, + Methods and Materials: 33 adult patients admitted to our emergency department with moderate or severe TBI (mean age 33 years, 21 males) underwent a standardized trauma 3T MRI-protocol at 28 weeks. The microbleeds in their SWI-scans were annotated by an expert. A CAD system was developed, based on this training set. Six experts, blind to the CAD-results, annotated a subset of ten patients. In two experiments, we compared the performance of the CAD system to each of these six experts, using the majority voting results of the other five experts as the reference standard for the calculation of performance characteristics (paired t-test). In the first experiment, the performance of fully automatic microbleed detection was assessed. In the second experiment, one expert removed CAD-annotations she considered false positives from the automatically detected microbleeds, and briefly screened the CAD-annotated SWI-scans to complete the dataset with missed definite microbleeds. + Results: Fully manual evaluation took one hour per patient with an average sensitivity of 77% (SD 12.4%). The sensitivity of fully automatic detection of candidate microbleeds was 89% (SD 0.8%). Evaluation of the CAD results by an expert took 13 minutes per patient with a sensitivity of 93% (SD 1.0%) (p < 0.05 versus fully manual evaluation). + Conclusion: This CAD system allows detecting more microbleeds in a reduced reading time. This may facilitate the execution of otherwise too time-consuming large studies on the clinical relevance of microbleeds.}, optnote = {DIAG, RADIOLOGY}, } +@article{Eerd21, + author = {van der Eerden, A.W. and van den Heuvel, T.L. and Perlbarg, V. and Vart, P. and Vos, P.E. and Puybasset, L. and Galanaud, D. and Platel, B. and Manniesing, R. and Goraj, B.M.}, + title = {~~Traumatic Cerebral Microbleeds in the Subacute Phase Are Practical and Early Predictors of Abnormality of the Normal-Appearing White Matter in the Chronic Phase}, + doi = {10.3174/ajnr.a7028}, + year = {2021}, + abstract = {BACKGROUND AND PURPOSE: In the chronic phase after traumatic brain injury, DTI findings reflect WM integrity. DTI interpretation in the subacute phase is less straightforward. Microbleed evaluation with SWI is straightforward in both phases. We evaluated whether the microbleed concentration in the subacute phase is associated with the integrity of normal-appearing WM in the chronic phase. MATERIALS AND METHODS: Sixty of 211 consecutive patients 18 years of age or older admitted to our emergency department <=24 hours after moderate to severe traumatic brain injury matched the selection criteria. Standardized 3T SWI, DTI, and T1WI were obtained 3 and 26 weeks after traumatic brain injury in 31 patients and 24 healthy volunteers. At baseline, microbleed concentrations were calculated. At follow-up, mean diffusivity (MD) was calculated in the normal-appearing WM in reference to the healthy volunteers (MDz). Through linear regression, we evaluated the relation between microbleed concentration and MDz in predefined structures. RESULTS: In the cerebral hemispheres, MDz at follow-up was independently associated with the microbleed concentration at baseline (left: B = 38.4 [95% CI 7.5-69.3], P = .017; right: B = 26.3 [95% CI 5.7-47.0], P = .014). No such relation was demonstrated in the central brain. MDz in the corpus callosum was independently associated with the microbleed concentration in the structures connected by WM tracts running through the corpus callosum (B = 20.0 [95% CI 24.8-75.2], P < .000). MDz in the central brain was independently associated with the microbleed concentration in the cerebral hemispheres (B = 25.7 [95% CI 3.9-47.5], P = .023). CONCLUSIONS: SWI-assessed microbleeds in the subacute phase are associated with DTI-based WM integrity in the chronic phase. These associations are found both within regions and between functionally connected regions. B + : linear regression coefficient + Bcmb-conc + : linear regression coefficient with microbleed concentration as independent variable + Bcmb-nr + : linear regression coefficient with microbleed number as independent variable + MD + : mean diffusivity + MDz + : Z -score of mean diffusivity, normalized to the healthy control participants + t1 + : 3 (2-5) weeks after TBI + t2 + : 26 (25-28) weeks after TBI + TAI + : traumatic axonal injury + TBI + : traumatic brain injury + FA + : fractional anisotropy + MARS + : Microbleed Anatomical Rating Scale + GCS + : Glasgow Coma Scale}, + url = {http://dx.doi.org/10.3174/ajnr.A7028}, + file = {Eerd21.pdf:pdf\\Eerd21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {American Journal of Neuroradiology}, + automatic = {yes}, + citation-count = {3}, + pages = {861-867}, + volume = {42}, + pmid = {34719725}, +} + +@article{Eerd21, + author = {van der Eerden, A.W. and van den Heuvel, T.L. and Perlbarg, V. and Vart, P. and Vos, P.E. and Puybasset, L. and Galanaud, D. and Platel, B. and Manniesing, R. and Goraj, B.M.}, + title = {~~Traumatic Cerebral Microbleeds in the Subacute Phase Are Practical and Early Predictors of Abnormality of the Normal-Appearing White Matter in the Chronic Phase}, + doi = {10.3174/ajnr.a7028}, + year = {2021}, + abstract = {BACKGROUND AND PURPOSE: In the chronic phase after traumatic brain injury, DTI findings reflect WM integrity. DTI interpretation in the subacute phase is less straightforward. Microbleed evaluation with SWI is straightforward in both phases. We evaluated whether the microbleed concentration in the subacute phase is associated with the integrity of normal-appearing WM in the chronic phase. MATERIALS AND METHODS: Sixty of 211 consecutive patients 18 years of age or older admitted to our emergency department <=24 hours after moderate to severe traumatic brain injury matched the selection criteria. Standardized 3T SWI, DTI, and T1WI were obtained 3 and 26 weeks after traumatic brain injury in 31 patients and 24 healthy volunteers. At baseline, microbleed concentrations were calculated. At follow-up, mean diffusivity (MD) was calculated in the normal-appearing WM in reference to the healthy volunteers (MDz). Through linear regression, we evaluated the relation between microbleed concentration and MDz in predefined structures. RESULTS: In the cerebral hemispheres, MDz at follow-up was independently associated with the microbleed concentration at baseline (left: B = 38.4 [95% CI 7.5-69.3], P = .017; right: B = 26.3 [95% CI 5.7-47.0], P = .014). No such relation was demonstrated in the central brain. MDz in the corpus callosum was independently associated with the microbleed concentration in the structures connected by WM tracts running through the corpus callosum (B = 20.0 [95% CI 24.8-75.2], P < .000). MDz in the central brain was independently associated with the microbleed concentration in the cerebral hemispheres (B = 25.7 [95% CI 3.9-47.5], P = .023). CONCLUSIONS: SWI-assessed microbleeds in the subacute phase are associated with DTI-based WM integrity in the chronic phase. These associations are found both within regions and between functionally connected regions. B + : linear regression coefficient + Bcmb-conc + : linear regression coefficient with microbleed concentration as independent variable + Bcmb-nr + : linear regression coefficient with microbleed number as independent variable + MD + : mean diffusivity + MDz + : Z -score of mean diffusivity, normalized to the healthy control participants + t1 + : 3 (2-5) weeks after TBI + t2 + : 26 (25-28) weeks after TBI + TAI + : traumatic axonal injury + TBI + : traumatic brain injury + FA + : fractional anisotropy + MARS + : Microbleed Anatomical Rating Scale + GCS + : Glasgow Coma Scale}, + url = {http://dx.doi.org/10.3174/ajnr.A7028}, + file = {Eerd21.pdf:pdf\\Eerd21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {American Journal of Neuroradiology}, + automatic = {yes}, + citation-count = {3}, + pages = {861-867}, + volume = {42}, + pmid = {34719725}, +} + @article{Ehte16, author = {Bejnordi, Babak Ehteshami and Litjens, Geert and Timofeeva, Nadya and Otte-Holler, Irene and Homeyer, Andre and Karssemeijer, Nico and van der Laak, Jeroen}, title = {Stain specific standardization of whole-slide histopathological images}, @@ -5541,7 +7057,9 @@ @article{Ehte16 optnote = {DIAG, RADIOLOGY}, pmid = {26353368}, gsid = {11832967785880346265}, - gscites = {128}, + gscites = {243}, + ss_id = {2729d2918978d5ed602aa843fbdd027d83e0036f}, + all_ss_ids = {['2729d2918978d5ed602aa843fbdd027d83e0036f']}, } @article{Ehte16a, @@ -5560,7 +7078,9 @@ @article{Ehte16a optnote = {DIAG}, pmid = {27076354}, gsid = {4689762899907422351}, - gscites = {55}, + gscites = {82}, + ss_id = {7e39820fb261f367ce8bf362de991ebff4400083}, + all_ss_ids = {['7e39820fb261f367ce8bf362de991ebff4400083']}, } @article{Ehte17, @@ -5578,7 +7098,9 @@ @article{Ehte17 optnote = {DIAG, RADIOLOGY}, pmid = {29234806}, gsid = {6260116032142865268}, - gscites = {791}, + gscites = {1000}, + ss_id = {ba913e2c03ece1c75f0af4d16dd11c7ffbc6e3ba}, + all_ss_ids = {['ba913e2c03ece1c75f0af4d16dd11c7ffbc6e3ba']}, } @article{Ehte18, @@ -5596,7 +7118,9 @@ @article{Ehte18 optnote = {DIAG}, pmid = {29899550}, gsid = {5374110760369104933}, - gscites = {52}, + gscites = {147}, + ss_id = {a58015a59562caf325a3f05288147704c055ce8d}, + all_ss_ids = {['a58015a59562caf325a3f05288147704c055ce8d']}, } @article{Eile08, @@ -5649,6 +7173,36 @@ @article{Eise12 number = {3}, pmid = {22391269}, month = {3}, + ss_id = {bb07c9b92d8d59595eb0a5e135c3f54ebae9e43b}, + all_ss_ids = {['bb07c9b92d8d59595eb0a5e135c3f54ebae9e43b']}, + gscites = {67}, +} + +@article{Eise22, + author = {Eisenmann, Matthias and Reinke, Annika and Weru, Vivienn and Tizabi, Minu Dietlinde and Isensee, Fabian and Adler, Tim J. and Godau, Patrick and Cheplygina, Veronika and Kozubek, Michal and Ali, Sharib and Gupta, Anubha and Kybic, Jan and Noble, Alison and de Sol\'{o}rzano, Carlos Ortiz and Pachade, Samiksha and Petitjean, Caroline and Sage, Daniel and Wei, Donglai and Wilden, Elizabeth and Alapatt, Deepak and Andrearczyk, Vincent and Baid, Ujjwal and Bakas, Spyridon and Balu, Niranjan and Bano, Sophia and Bawa, Vivek Singh and Bernal, Jorge and Bodenstedt, Sebastian and Casella, Alessandro and Choi, Jinwook and Commowick, Olivier and Daum, Marie and Depeursinge, Adrien and Dorent, Reuben and Egger, Jan and Eichhorn, Hannah and Engelhardt, Sandy and Ganz, Melanie and Girard, Gabriel and Hansen, Lasse and Heinrich, Mattias and Heller, Nicholas and Hering, Alessa and Huaulm\'{e}, Arnaud and Kim, Hyunjeong and Landman, Bennett and Li, Hongwei Bran and Li, Jianning and Ma, Jun and Martel, Anne and Mart\'{i}n-Isla, Carlos and Menze, Bjoern and Nwoye, Chinedu Innocent and Oreiller, Valentin and Padoy, Nicolas and Pati, Sarthak and Payette, Kelly and Sudre, Carole and van Wijnen, Kimberlin and Vardazaryan, Armine and Vercauteren, Tom and Wagner, Martin and Wang, Chuanbo and Yap, Moi Hoon and Yu, Zeyun and Yuan, Chun and Zenk, Maximilian and Zia, Aneeq and Zimmerer, David and Bao, Rina and Choi, Chanyeol and Cohen, Andrew and Dzyubachyk, Oleh and Galdran, Adrian and Gan, Tianyuan and Guo, Tianqi and Gupta, Pradyumna and Haithami, Mahmood and Ho, Edward and Jang, Ikbeom and Li, Zhili and Luo, Zhengbo and Lux, Filip and Makrogiannis, Sokratis and M\"{u}ller, Dominik and Oh, Young-tack and Pang, Subeen and Pape, Constantin and Polat, Gorkem and Reed, Charlotte Rosalie and Ryu, Kanghyun and Scherr, Tim and Thambawita, Vajira and Wang, Haoyu and Wang, Xinliang and Xu, Kele and Yeh, Hung and Yeo, Doyeob and Yuan, Yixuan and Zeng, Yan and Zhao, Xin and Abbing, Julian and Adam, Jannes and Adluru, Nagesh and Agethen, Niklas and Ahmed, Salman and Khalil, Yasmina Al and Aleny\`{a}, Mireia and Alhoniemi, Esa and An, Chengyang and Anwar, Talha and Arega, Tewodros Weldebirhan and Avisdris, Netanell and Aydogan, Dogu Baran and Bai, Yingbin and Calisto, Maria Baldeon and Basaran, Berke Doga and Beetz, Marcel and Bian, Cheng and Bian, Hao and Blansit, Kevin and Bloch, Louise and Bohnsack, Robert and Bosticardo, Sara and Breen, Jack and Brudfors, Mikael and Br\"{u}ngel, Raphael and Cabezas, Mariano and Cacciola, Alberto and Chen, Zhiwei and Chen, Yucong and Chen, Daniel Tianming and Cho, Minjeong and Choi, Min-Kook and Xie, Chuantao Xie Chuantao and Cobzas, Dana and Cohen-Adad, Julien and Acero, Jorge Corral and Das, Sujit Kumar and de Oliveira, Marcela and Deng, Hanqiu and Dong, Guiming and Doorenbos, Lars and Efird, Cory and Escalera, Sergio and Fan, Di and Serj, Mehdi Fatan and Fenneteau, Alexandre and Fidon, Lucas and Filipiak, Patryk and Finzel, Ren\'{e} and Freitas, Nuno R. and Friedrich, Christoph M. and Fulton, Mitchell and Gaida, Finn and Galati, Francesco and Galazis, Christoforos and Gan, Chang Hee and Gao, Zheyao and Gao, Shengbo and Gazda, Matej and Gerats, Beerend and Getty, Neil and Gibicar, Adam and Gifford, Ryan and Gohil, Sajan and Grammatikopoulou, Maria and Grzech, Daniel and G\"{u}ley, Orhun and G\"{u}nnemann, Timo and Guo, Chunxu and Guy, Sylvain and Ha, Heonjin and Han, Luyi and Han, Il Song and Hatamizadeh, Ali and He, Tian and Heo, Jimin and Hitziger, Sebastian and Hong, SeulGi and Hong, SeungBum and Huang, Rian and Huang, Ziyan and Huellebrand, Markus and Huschauer, Stephan and Hussain, Mustaffa and Inubushi, Tomoo and Polat, Ece Isik and Jafaritadi, Mojtaba and Jeong, SeongHun and Jian, Bailiang and Jiang, Yuanhong and Jiang, Zhifan and Jin, Yueming and Joshi, Smriti and Kadkhodamohammadi, Abdolrahim and Kamraoui, Reda Abdellah and Kang, Inha and Kang, Junghwa and Karimi, Davood and Khademi, April and Khan, Muhammad Irfan and Khan, Suleiman A. and Khantwal, Rishab and Kim, Kwang-Ju and Kline, Timothy and Kondo, Satoshi and Kontio, Elina and Krenzer, Adrian and Kroviakov, Artem and Kuijf, Hugo and Kumar, Satyadwyoom and La Rosa, Francesco and Lad, Abhi and Lee, Doohee and Lee, Minho and Lena, Chiara and Li, Hao and Li, Ling and Li, Xingyu and Liao, Fuyuan and Liao, KuanLun and Oliveira, Arlindo Limede and Lin, Chaonan and Lin, Shan and Linardos, Akis and Linguraru, Marius George and Liu, Han and Liu, Tao and Liu, Di and Liu, Yanling and Louren\c{c}o-Silva, Jo\~{a}o and Lu, Jingpei and Lu, Jiangshan and Luengo, Imanol and Lund, Christina B. and Luu, Huan Minh and Lv, Yi and Lv, Yi and Macar, Uzay and Maechler, Leon and L., Sina Mansour and Marshall, Kenji and Mazher, Moona and McKinley, Richard and Medela, Alfonso and Meissen, Felix and Meng, Mingyuan and Miller, Dylan and Mirjahanmardi, Seyed Hossein and Mishra, Arnab and Mitha, Samir and Mohy-ud-Din, Hassan and Mok, Tony Chi Wing and Murugesan, Gowtham Krishnan and Karthik, Enamundram Naga and Nalawade, Sahil and Nalepa, Jakub and Naser, Mohamed and Nateghi, Ramin and Naveed, Hammad and Nguyen, Quang-Minh and Quoc, Cuong Nguyen and Nichyporuk, Brennan and Oliveira, Bruno and Owen, David and Pal, Jimut Bahan and Pan, Junwen and Pan, Wentao and Pang, Winnie and Park, Bogyu and Pawar, Vivek and Pawar, Kamlesh and Peven, Michael and Philipp, Lena and Pieciak, Tomasz and Plotka, Szymon and Plutat, Marcel and Pourakpour, Fattaneh and Preloznik, Domen and Punithakumar, Kumaradevan and Qayyum, Abdul and Queir\'{o}s, Sandro and Rahmim, Arman and Razavi, Salar and Ren, Jintao and Rezaei, Mina and Rico, Jonathan Adam and Rieu, ZunHyan and Rink, Markus and Roth, Johannes and Ruiz-Gonzalez, Yusely and Saeed, Numan and Saha, Anindo and Salem, Mostafa and Sanchez-Matilla, Ricardo and Schilling, Kurt and Shao, Wei and Shen, Zhiqiang and Shi, Ruize and Shi, Pengcheng and Sobotka, Daniel and Soulier, Th\'{e}odore and Fadida, Bella Specktor and Stoyanov, Danail and Mun, Timothy Sum Hon and Sun, Xiaowu and Tao, Rong and Thaler, Franz and Th\'{e}berge, Antoine and Thielke, Felix and Torres, Helena and Wahid, Kareem A. and Wang, Jiacheng and Wang, YiFei and Wang, Wei and Wang, Xiong and Wen, Jianhui and Wen, Ning and Wodzinski, Marek and Wu, Ye and Xia, Fangfang and Xiang, Tianqi and Xiaofei, Chen and Xu, Lizhan and Xue, Tingting and Yang, Yuxuan and Yang, Lin and Yao, Kai and Yao, Huifeng and Yazdani, Amirsaeed and Yip, Michael and Yoo, Hwanseung and Yousefirizi, Fereshteh and Yu, Shunkai and Yu, Lei and Zamora, Jonathan and Zeineldin, Ramy Ashraf and Zeng, Dewen and Zhang, Jianpeng and Zhang, Bokai and Zhang, Jiapeng and Zhang, Fan and Zhang, Huahong and Zhao, Zhongchen and Zhao, Zixuan and Zhao, Jiachen and Zhao, Can and Zheng, Qingshuo and Zhi, Yuheng and Zhou, Ziqi and Zou, Baosheng and Maier-Hein, Klaus and J\"{a}ger, Paul F. and Kopp-Schneider, Annette and Maier-Hein, Lena}, + title = {~~Biomedical image analysis competitions: The state of current participation practice}, + doi = {10.48550/ARXIV.2212.08568}, + year = {2022}, + abstract = {The number of international benchmarking competitions is steadily increasing in various fields of machine learning (ML) research and practice. So far, however, little is known about the common practice as well as bottlenecks faced by the community in tackling the research questions posed. To shed light on the status quo of algorithm development in the specific field of biomedical imaging analysis, we designed an international survey that was issued to all participants of challenges conducted in conjunction with the IEEE ISBI 2021 and MICCAI 2021 conferences (80 competitions in total). The survey covered participants' expertise and working environments, their chosen strategies, as well as algorithm characteristics. A median of 72% challenge participants took part in the survey. According to our results, knowledge exchange was the primary incentive (70%) for participation, while the reception of prize money played only a minor role (16%). While a median of 80 working hours was spent on method development, a large portion of participants stated that they did not have enough time for method development (32%). 25% perceived the infrastructure to be a bottleneck. Overall, 94% of all solutions were deep learning-based. Of these, 84% were based on standard architectures. 43% of the respondents reported that the data samples (e.g., images) were too large to be processed at once. This was most commonly addressed by patch-based training (69%), downsampling (37%), and solving 3D analysis tasks as a series of 2D tasks. K-fold cross-validation on the training set was performed by only 37% of the participants and only 50% of the participants performed ensembling based on multiple identical models (61%) or heterogeneous models (39%). 48% of the respondents applied postprocessing steps.}, + url = {https://arxiv.org/abs/2212.08568}, + file = {Eise22.pdf:pdf\\Eise22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2212.08568}, + automatic = {yes}, +} + +@inproceedings{Eise23, + author = {Eisenmann, M. and Reinke, A. and Weru, V. and Tizabi, M. D. and Isensee, F. and Adler, T. J. and Ali, S. and Andrearczyk, V. and Aubreville, M. and Baid, U. and Bakas, S. and Balu, N. and Bano, S. and Bernal, J. and Bodenstedt, S. and Casella, A. and Cheplygina, V. and Daum, M. and De Bruijne, M. and Depeursinge, A. and Dorent, R. and Egger, J. and Ellis, D. G. and Engelhardt, S. and Ganz, M. and Ghatwary, N. and Girard, G. and Godau, P. and Gupta, A. and Hansen, L. and Harada, K. and Heinrich, M. and Heller, N. and Hering, A. and Huaulm\'{e}, A. and Jannin, P. and Kavur, A. E. and Kodym, O. and Kozubek, M. and Li, J. and Li, H. and Ma, J. and Mart\'{i}n-Isla, C. and Menze, B. and Noble, A. and Oreiller, V. and Padoy, N. and Pati, S. and Payette, K. and R\"{a}dsch, T. and Rafael-Pati\~{n}o, J. and Bawa, V. Singh and Speidel, S. and Sudre, C. H. and Van Wijnen, K. and Wagner, M. and Wei, D. and Yamlahi, A. and Yap, M. H. and Yuan, C. and Zenk, M. and Zia, A. and Zimmerer, D. and Aydogan, D. and Bhattarai, B. and Bloch, L. and Br\"{u}ngel, R. and Cho, J. and Choi, C. and Dou, Q. and Ezhov, I. and Friedrich, C. M. and Fuller, C. and Gaire, R. R. and Galdran, A. and Faura, \'{A}. Garc\'{i}a and Grammatikopoulou, M. and Hong, S. and Jahanifar, M. and Jang, I. and Kadkhodamohammadi, A. and Kang, I. and Kofler, F. and Kondo, S. and Kuijf, H. and Li, M. and Luu, M. and Martin\v{c}i\v{c}, T. and Morais, P. and Naser, M. A. and Oliveira, B. and Owen, D. and Pang, S. and Park, J. and Park, S. and Plotka, S. and Puybareau, E. and Rajpoot, N. and Ryu, K. and Saeed, N. and Shephard, A. and Shi, P. and Stepec, D. and Subedi, R. and Tochon, G. and Torres, H. R. and Urien, H. and Vila\c{c}a, J. L. and Wahid, K. A. and Wang, H. and Wang, J. and Wang, L. and Wang, X. and Wiestler, B. and Wodzinski, M. and Xia, F. and Xie, J. and Xiong, Z. and Yang, S. and Yang, Y. and Zhao, Z. and Maier-Hein, K. and J\"{a}ger, P. F. and Kopp-Schneider, A. and Maier-Hein, L.}, + title = {~~Why is the Winner the Best?}, + doi = {10.1109/cvpr52729.2023.01911}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/CVPR52729.2023.01911}, + file = {Eise23.pdf:pdf\\Eise23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, + automatic = {yes}, + citation-count = {0}, } @article{Emau15, @@ -5667,7 +7221,8 @@ @article{Emau15 optnote = {DIAG, RADIOLOGY}, pmid = {26110667}, gsid = {11553070516898932123}, - gscites = {72}, + gscites = {90}, + all_ss_ids = {['fb1084b454e9b6f203c860e6f89ca4389cd1a8e9', 'fc7b70a4d154e3d2dfabe4393809dfb29cc36a7a']}, } @article{Emau19, @@ -5680,6 +7235,9 @@ @article{Emau19 doi = {10.1136/bmjopen-2018-028752}, optnote = {DIAG, RADIOLOGY}, file = {Emau19.pdf:pdf\\Emau19.pdf:PDF}, + ss_id = {88d7b3243c9083be8542ce1fb171241c8e069948}, + all_ss_ids = {['88d7b3243c9083be8542ce1fb171241c8e069948']}, + gscites = {13}, } @inproceedings{Enge01, @@ -5697,6 +7255,8 @@ @inproceedings{Enge01 optnote = {DIAG, RADIOLOGY}, gsid = {9965525877276769229}, gscites = {6}, + ss_id = {2c91e8d6e8f91f58003862001c4d9b65ac2e966e}, + all_ss_ids = {['2c91e8d6e8f91f58003862001c4d9b65ac2e966e']}, } @article{Enge02, @@ -5732,6 +7292,8 @@ @article{Enge03 month = {11}, gsid = {9254307573267034363}, gscites = {101}, + ss_id = {047a1a51dff311bfdbf5eed4ab11805d059bdf84}, + all_ss_ids = {['047a1a51dff311bfdbf5eed4ab11805d059bdf84']}, } @article{Enge03a, @@ -5749,8 +7311,10 @@ @article{Enge03a pmid = {12944607}, month = {10}, gsid = {2675577220654358659,13589576213391752314}, - gscites = {461}, + gscites = {465}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/122329}, + ss_id = {3887e2f1a64b3558388508be00169a39379c473a}, + all_ss_ids = {['3887e2f1a64b3558388508be00169a39379c473a']}, } @inproceedings{Enge03b, @@ -5768,6 +7332,8 @@ @inproceedings{Enge03b month = {5}, gsid = {2357872457316668238}, gscites = {8}, + ss_id = {a77f68203e9f4e3b0bb83aea1fdc085ee50af4d4}, + all_ss_ids = {['a77f68203e9f4e3b0bb83aea1fdc085ee50af4d4']}, } @inproceedings{Enge05, @@ -5785,6 +7351,8 @@ @inproceedings{Enge05 month = {4}, gsid = {6185505303724264209}, gscites = {5}, + ss_id = {bc98a58c4ad70fe8e10233541b8b7174239c649f}, + all_ss_ids = {['bc98a58c4ad70fe8e10233541b8b7174239c649f']}, } @inproceedings{Enge05b, @@ -5803,6 +7371,8 @@ @inproceedings{Enge05b month = {4}, gsid = {8156651029382882086}, gscites = {2}, + ss_id = {2ef80dbd4ef99216073e7287dfd6f785d1a15877}, + all_ss_ids = {['2ef80dbd4ef99216073e7287dfd6f785d1a15877']}, } @article{Enge06, @@ -5840,6 +7410,8 @@ @article{Enge06a gsid = {12818555548460097494}, gscites = {250}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/51174}, + ss_id = {878d8de48dd9eafa5ec823e17d918b56c856eed9}, + all_ss_ids = {['878d8de48dd9eafa5ec823e17d918b56c856eed9']}, } @phdthesis{Enge06b, @@ -5872,6 +7444,8 @@ @article{Enge07 month = {2}, gsid = {1290158304389131269}, gscites = {73}, + ss_id = {e5991fe901cb59f561010d99925459a0a99cb9ac}, + all_ss_ids = {['e5991fe901cb59f561010d99925459a0a99cb9ac']}, } @conference{Enge19, @@ -5879,14 +7453,30 @@ @conference{Enge19 booktitle = ARVO, title = {Automatic Segmentation of Drusen and Exudates on Color Fundus Images using Generative Adversarial Networks}, abstract = {Purpose: The presence of drusen and exudates, visible as bright lesions on color fundus images, is one of the early signs of visual threatening diseases such as Age-related Macular Degeneration and Diabetic Retinopathy. Accurate detection and quantification of these lesions during screening can help identify patients that would benefit from treatment. We developed a method based on generative adversarial networks (GANs) to segment bright lesions on color fundus images. - - Methods: We used 4179 color fundus images that were acquired during clinical routine. The images were contrast enhanced to increase the contrast between bright lesions and the background. All bright lesions were manually annotated by marking the center point of the lesions. The GAN was trained to estimate the image without bright lesions. The final segmentation was obtained by taking the difference between the input image and the estimated output. - - Results: This method was applied to an independent test set of 52 color fundus images with non-advanced stages of AMD from the European Genetic Database, which were fully segmented for bright lesions by two trained human observers. The method achieved Dice scores of 0.4862 and 0.4849 when compared to the observers, whereas the inter-observer Dice score was 0.5043. The total segmented bright lesion area per image was evaluated using the intraclass correlation (ICC). The method scored 0.8537 and 0.8352 when compared to the observers, whereas the inter-observer ICC was 0.8893. - - Conclusions: The results show the performance is close to the agreement between trained observers. This automatic segmentation of bright lesions can help early diagnosis of visual threatening diseases and opens the way for large scale clinical trials.}, + + Methods: We used 4179 color fundus images that were acquired during clinical routine. The images were contrast enhanced to increase the contrast between bright lesions and the background. All bright lesions were manually annotated by marking the center point of the lesions. The GAN was trained to estimate the image without bright lesions. The final segmentation was obtained by taking the difference between the input image and the estimated output. + + Results: This method was applied to an independent test set of 52 color fundus images with non-advanced stages of AMD from the European Genetic Database, which were fully segmented for bright lesions by two trained human observers. The method achieved Dice scores of 0.4862 and 0.4849 when compared to the observers, whereas the inter-observer Dice score was 0.5043. The total segmented bright lesion area per image was evaluated using the intraclass correlation (ICC). The method scored 0.8537 and 0.8352 when compared to the observers, whereas the inter-observer ICC was 0.8893. + + Conclusions: The results show the performance is close to the agreement between trained observers. This automatic segmentation of bright lesions can help early diagnosis of visual threatening diseases and opens the way for large scale clinical trials.}, optnote = {DIAG, RADIOLOGY}, year = {2019}, + all_ss_ids = {34559bb0d95c5166625945eef9b53b21a30838fa}, + gscites = {1}, +} + +@article{Esch22, + author = {Eschert, Tim and Schwendicke, Falk and Krois, Joachim and Bohner, Lauren and Vinayahalingam, Shankeeth and Hanisch, Marcel}, + title = {A Survey on the Use of Artificial Intelligence by Clinicians in Dentistry and Oral and Maxillofacial Surgery.}, + doi = {10.3390/medicina58081059}, + issue = {8}, + volume = {58}, + abstract = {Applications of artificial intelligence (AI) in medicine and dentistry have been on the rise in recent years. In dental radiology, deep learning approaches have improved diagnostics, outperforming clinicians in accuracy and efficiency. This study aimed to provide information on clinicians' knowledge and perceptions regarding AI. A 21-item questionnaire was used to study the views of dentistry professionals on AI use in clinical practice. In total, 302 questionnaires were answered and assessed. Most of the respondents rated their knowledge of AI as average (37.1%), below average (22.2%) or very poor (23.2%). The participants were largely convinced that AI would improve and bring about uniformity in diagnostics (mean Likert +- standard deviation 3.7 +- 1.27). Among the most serious concerns were the responsibility for machine errors (3.7 +- 1.3), data security or privacy issues (3.5 +- 1.24) and the divestment of healthcare to large technology companies (3.5 +- 1.28). : Within the limitations of this study, insights into the acceptance and use of AI in dentistry are revealed for the first time.}, + file = {Esch22.pdf:pdf\\Esch22.pdf:PDF}, + journal = {Medicina (Kaunas, Lithuania)}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36013526}, + year = {2022}, } @conference{Estr12, @@ -5912,6 +7502,23 @@ @phdthesis{Estr19 journal = {PhD thesis}, } +@article{Exte20, + author = {den Exter, Paul L. and Kroft, Lucia J.M. and Gonsalves, Carol and Le Gal, Gregoire and Schaefer-Prokop, Cornelia M. and Carrier, Marc and Huisman, Menno V. and Klok, Frederikus A.}, + title = {~~Establishing diagnostic criteria and treatment of subsegmental pulmonary embolism: A Delphi analysis of experts}, + doi = {10.1002/rth2.12422}, + year = {2020}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1002/rth2.12422}, + file = {Exte20.pdf:pdf\\Exte20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Research and Practice in Thrombosis and Haemostasis}, + automatic = {yes}, + citation-count = {15}, + pages = {1251-1261}, + volume = {4}, + pmid = {33313465}, +} + @article{Fait21, author = {Faita, Francesco and Oranges, Teresa and Di Lascio, Nicole and Ciompi, Francesco and Vitali, Saverio and Aringhieri, Giacomo and Janowska, Agata and Romanelli, Marco and Dini, Valentina}, title = {Ultra-high-frequency ultrasound and machine learning approaches for the differential diagnosis of melanocytic lesions.}, @@ -5923,6 +7530,9 @@ @article{Fait21 pmid = {33738861}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/249929}, + ss_id = {dda2624afa1a1e32c91322c192e397913fe2fc7d}, + all_ss_ids = {['dda2624afa1a1e32c91322c192e397913fe2fc7d']}, + gscites = {11}, } @mastersthesis{Fary20, @@ -5958,6 +7568,9 @@ @inproceedings{Fary20b url = {https://arxiv.org/abs/2101.06468}, abstract = {We propose a novel framework for controllable pathological image synthesis for data augmentation. Inspired by CycleGAN, we perform cycle-consistent image-to-image translation between two domains: healthy and pathological. Guided by a semantic mask, an adversarially trained generator synthesizes pathology on a healthy image in the specified location. We demonstrate our approach on an institutional dataset of cerebral microbleeds in traumatic brain injury patients. We utilize synthetic images generated with our method for data augmentation in cerebral microbleeds detection. Enriching the training dataset with synthetic images exhibits the potential to increase detection performance for cerebral microbleeds in traumatic brain injury patients.}, optnote = {DIAG, RADIOLOGY}, + ss_id = {4c4f04d1af903f4eabe7767b47439a79e0f6a711}, + all_ss_ids = {['4c4f04d1af903f4eabe7767b47439a79e0f6a711']}, + gscites = {2}, } @inproceedings{Fary21, @@ -5968,6 +7581,23 @@ @inproceedings{Fary21 file = {:pdf/Fary21.pdf:PDF}, optnote = {DIAG}, year = {2021}, + ss_id = {79224099fea90dd0316719579f0c635540a3af7e}, + all_ss_ids = {['79224099fea90dd0316719579f0c635540a3af7e']}, + gscites = {27}, +} + +@article{Fehe22, + author = {Feher, Balazs and Kuchler, Ulrike and Schwendicke, Falk and Schneider, Lisa and Cejudo Grano de Oro, Jose Eduardo and Xi, Tong and Vinayahalingam, Shankeeth and Hsu, Tzu-Ming Harry and Brinz, Janet and Chaurasia, Akhilanand and Dhingra, Kunaal and Gaudin, Robert Andre and Mohammad-Rahimi, Hossein and Pereira, Nielsen and Perez-Pastor, Francesc and Tryfonos, Olga and Uribe, Sergio E. and Hanisch, Marcel and Krois, Joachim}, + title = {Emulating Clinical Diagnostic Reasoning for Jaw Cysts with Machine Learning.}, + doi = {10.3390/diagnostics12081968}, + issue = {8}, + volume = {12}, + abstract = {The detection and classification of cystic lesions of the jaw is of high clinical relevance and represents a topic of interest in medical artificial intelligence research. The human clinical diagnostic reasoning process uses contextual information, including the spatial relation of the detected lesion to other anatomical structures, to establish a preliminary classification. Here, we aimed to emulate clinical diagnostic reasoning step by step by using a combined object detection and image segmentation approach on panoramic radiographs (OPGs). We used a multicenter training dataset of 855 OPGs (all positives) and an evaluation set of 384 OPGs (240 negatives). We further compared our models to an international human control group of ten dental professionals from seven countries. The object detection model achieved an average precision of 0.42 (intersection over union (IoU): 0.50, maximal detections: 100) and an average recall of 0.394 (IoU: 0.50-0.95, maximal detections: 100). The classification model achieved a sensitivity of 0.84 for odontogenic cysts and 0.56 for non-odontogenic cysts as well as a specificity of 0.59 for odontogenic cysts and 0.84 for non-odontogenic cysts (IoU: 0.30). The human control group achieved a sensitivity of 0.70 for odontogenic cysts, 0.44 for non-odontogenic cysts, and 0.56 for OPGs without cysts as well as a specificity of 0.62 for odontogenic cysts, 0.95 for non-odontogenic cysts, and 0.76 for OPGs without cysts. Taken together, our results show that a combined object detection and image segmentation approach is feasible in emulating the human clinical diagnostic reasoning process in classifying cystic lesions of the jaw.}, + file = {Fehe22.pdf:pdf\\Fehe22.pdf:PDF}, + journal = {Diagnostics (Basel, Switzerland)}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36010318}, + year = {2022}, } @article{Fens13, @@ -5986,6 +7616,8 @@ @article{Fens13 gsid = {14645718639357321343}, gscites = {33}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/140583}, + ss_id = {14629625274f335f1c62fddf4caf435dde0489f6}, + all_ss_ids = {['14629625274f335f1c62fddf4caf435dde0489f6']}, } @inproceedings{Ferr08, @@ -6008,6 +7640,9 @@ @inproceedings{Fick21 month = {11}, optnote = {DIAG}, year = {2021}, + ss_id = {bb3eedbda08fa1290810666ec202624593e6e2bc}, + all_ss_ids = {['bb3eedbda08fa1290810666ec202624593e6e2bc']}, + gscites = {2}, } @article{Fing19, @@ -6026,7 +7661,9 @@ @article{Fing19 pmid = {30153664}, month = {8}, gsid = {3052267128114152032}, - gscites = {18}, + gscites = {66}, + ss_id = {14d5f7dc42e1af0d92d9e3e3124619a0cc0e395d}, + all_ss_ids = {['14d5f7dc42e1af0d92d9e3e3124619a0cc0e395d']}, } @conference{Firo08, @@ -6056,6 +7693,8 @@ @article{Firo10 month = {8}, gsid = {19274435147675320}, gscites = {53}, + ss_id = {2f7749b662adea99d4231d993319f7f24afb844b}, + all_ss_ids = {['2f7749b662adea99d4231d993319f7f24afb844b']}, } @inproceedings{Firo10a, @@ -6074,6 +7713,8 @@ @inproceedings{Firo10a month = {3}, gsid = {6944444081743431375}, gscites = {1}, + ss_id = {ee26d8460390b683da4bbdf90ce004257b400878}, + all_ss_ids = {['ee26d8460390b683da4bbdf90ce004257b400878']}, } @inproceedings{Firo12, @@ -6089,6 +7730,9 @@ @inproceedings{Firo12 file = {Firo12.pdf:pdf\\Firo12.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {2}, + ss_id = {70cb5fd701a0abcb387281dcd399ba803df711b4}, + all_ss_ids = {['70cb5fd701a0abcb387281dcd399ba803df711b4']}, + gscites = {0}, } @article{Firo12a, @@ -6107,6 +7751,8 @@ @article{Firo12a month = {1}, gsid = {12392195614729486756}, gscites = {17}, + ss_id = {bad6b84ad1b5439be33b69909ff18b2553f2f403}, + all_ss_ids = {['bad6b84ad1b5439be33b69909ff18b2553f2f403']}, } @phdthesis{Firo13, @@ -6135,6 +7781,8 @@ @inproceedings{Fond13 month = {6}, gsid = {14225812249063646700}, gscites = {10}, + ss_id = {13aa11f60bf53aee440facaee410163650dbedf3}, + all_ss_ids = {['13aa11f60bf53aee440facaee410163650dbedf3']}, } @article{Fort12, @@ -6152,18 +7800,56 @@ @article{Fort12 month = {11}, } -@Conference{Fransen22, - author = {S. J. Fransen and C. Roest and Q. Y. van Lohuizen and J. S. Bosma and T. C. Kwee and D. Yakar, and H. Huisman}, +@article{Four21, + author = {Fournier, Laure and Costaridou, Lena and Bidaut, Luc and Michoux, Nicolas and Lecouvet, Frederic E. and de Geus-Oei, Lioe-Fee and Boellaard, Ronald and Oprea-Lager, Daniela E. and Obuchowski, Nancy A and Caroli, Anna and Kunz, Wolfgang G. and Oei, Edwin H. and O'Connor, James P. B. and Mayerhoefer, Marius E. and Franca, Manuela and Alberich-Bayarri, Angel and Deroose, Christophe M. and Loewe, Christian and Manniesing, Rashindra and Caramella, Caroline and Lopci, Egesta and Lassau, Nathalie and Persson, Anders and Achten, Rik and Rosendahl, Karen and Clement, Olivier and Kotter, Elmar and Golay, Xavier and Smits, Marion and Dewey, Marc and Sullivan, Daniel C. and van der Lugt, Aad and deSouza, Nandita M. and European Society of Radiology}, + title = {~~Incorporating radiomics into clinical trials: expert consensus endorsed by the European Society of Radiology on considerations for data-driven compared to biologically driven quantitative biomarkers}, + doi = {10.1007/s00330-020-07598-8}, + year = {2021}, + abstract = { + Abstract + Existing quantitative imaging biomarkers (QIBs) are associated with known biological tissue characteristics and follow a well-understood path of technical, biological and clinical validation before incorporation into clinical trials. In radiomics, novel data-driven processes extract numerous visually imperceptible statistical features from the imaging data with no a priori assumptions on their correlation with biological processes. The selection of relevant features (radiomic signature) and incorporation into clinical trials therefore requires additional considerations to ensure meaningful imaging endpoints. Also, the number of radiomic features tested means that power calculations would result in sample sizes impossible to achieve within clinical trials. This article examines how the process of standardising and validating data-driven imaging biomarkers differs from those based on biological associations. Radiomic signatures are best developed initially on datasets that represent diversity of acquisition protocols as well as diversity of disease and of normal findings, rather than within clinical trials with standardised and optimised protocols as this would risk the selection of radiomic features being linked to the imaging process rather than the pathology. Normalisation through discretisation and feature harmonisation are essential pre-processing steps. Biological correlation may be performed after the technical and clinical validity of a radiomic signature is established, but is not mandatory. Feature selection may be part of discovery within a radiomics-specific trial or represent exploratory endpoints within an established trial; a previously validated radiomic signature may even be used as a primary/secondary endpoint, particularly if associations are demonstrated with specific biological processes and pathways being targeted within clinical trials. + + Key Points + * Data-driven processes like radiomics risk false discoveries due to high-dimensionality of the dataset compared to sample size, making adequate diversity of the data, cross-validation and external validation essential to mitigate the risks of spurious associations and overfitting. + * Use of radiomic signatures within clinical trials requires multistep standardisation of image acquisition, image analysis and data mining processes. + * Biological correlation may be established after clinical validation but is not mandatory. + }, + url = {http://dx.doi.org/10.1007/s00330-020-07598-8}, + file = {Four21.pdf:pdf\\Four21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {45}, + pages = {6001-6012}, + volume = {31}, + pmid = {33492473}, +} + +@conference{Fransen22, + author = {S. J. Fransen and C. Roest and Q. Y. van Lohuizen and J. S. Bosma and T. C. Kwee and D. Yakar, and H. Huisman}, booktitle = RSNA, - title = {Diagnostic AI to speed up MRI protocols by identifying redundant sequences: are all diffusion-weighted prostate MRI sequences necessary?}, - abstract = {PURPOSE: To explore if an expert-level diagnostic AI can help speed up MRI by identifying redundant sequences in diffusion-weighted imaging (DWI) for the diagnostic detection of clinically significant prostate cancer (csPCa). -MATERIALS AND METHODS: Existing deep learning AI architectures detects csPCa based on bpMRI at expert-level. We developed a method in which this AI assesses the added diagnostic value of a sequence. This retrospective study included 840 patients with a bi-parametric prostate MRI (bpMRI) for initial detection of csPCa. The bmMRI comprises a T2-weighted image and DWI with b-values of 50, 400, and 800 s/mm² on a 3T scanner (Skyra and Prima). Our method entails computing ADC and b1400 maps based on different DWI combinations: 1) b800 excluded, 2) b400 excluded, 3) complete set. AI models for the various bpMRI combination were trained 5-fold and statistically compared with receiver operating curve (ROC) analysis at patient and lesion level using respectively the DeLong's and permutation test. -RESULTS: The mean area under the ROC of the three combinations were respectively 0.78 +-0.027 (SD), 0.76 +-0.051, and 0.77 +- 0.057. The partial area under of the free ROC between 0.1 and 2.5 false positives lesions per patient was respectively 1.44 +- 0.22, 1.58 +- 0.18 and 1.50 +- 0.12. The slight difference in diagnostic performance (patient-level 0.01, lesion-level 0.06 ) when omitting sequence DWI b800 is not significant (respectively p = 0.2 and p = 0.43). -CONCLUSION: We conclude that expert-level AI can identify redundant sequences in MRI. Furthermore, our method provides evidence that in DWI for csPCa detection, the b800 series can be omitted from the regular bpMRI protocol decreasing total MRI scan time by 33\%. These results can provide significant speed-up of any MRI.}, - optnote = {DIAG, RADIOLOGY}, - year = {2022}, + title = {Diagnostic AI to speed up MRI protocols by identifying redundant sequences: are all diffusion-weighted prostate MRI sequences necessary?}, + abstract = {PURPOSE: To explore if an expert-level diagnostic AI can help speed up MRI by identifying redundant sequences in diffusion-weighted imaging (DWI) for the diagnostic detection of clinically significant prostate cancer (csPCa). + MATERIALS AND METHODS: Existing deep learning AI architectures detects csPCa based on bpMRI at expert-level. We developed a method in which this AI assesses the added diagnostic value of a sequence. This retrospective study included 840 patients with a bi-parametric prostate MRI (bpMRI) for initial detection of csPCa. The bmMRI comprises a T2-weighted image and DWI with b-values of 50, 400, and 800 s/mm2 on a 3T scanner (Skyra and Prima). Our method entails computing ADC and b1400 maps based on different DWI combinations: 1) b800 excluded, 2) b400 excluded, 3) complete set. AI models for the various bpMRI combination were trained 5-fold and statistically compared with receiver operating curve (ROC) analysis at patient and lesion level using respectively the DeLong's and permutation test. + RESULTS: The mean area under the ROC of the three combinations were respectively 0.78 +-0.027 (SD), 0.76 +-0.051, and 0.77 +- 0.057. The partial area under of the free ROC between 0.1 and 2.5 false positives lesions per patient was respectively 1.44 +- 0.22, 1.58 +- 0.18 and 1.50 +- 0.12. The slight difference in diagnostic performance (patient-level 0.01, lesion-level 0.06 ) when omitting sequence DWI b800 is not significant (respectively p = 0.2 and p = 0.43). + CONCLUSION: We conclude that expert-level AI can identify redundant sequences in MRI. Furthermore, our method provides evidence that in DWI for csPCa detection, the b800 series can be omitted from the regular bpMRI protocol decreasing total MRI scan time by 33\%. These results can provide significant speed-up of any MRI.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, } +@inproceedings{Froh23, + author = {Frohwitter, Nils and Hering, Alessa and M\"{o}ller, Ralf and Hartwig, Mattis}, + title = {~~Evaluating the Effects of a Priori Deep Learning Image Synthesis on Multi-Modal MR-to-CT Image Registration Performance}, + doi = {10.5220/0011669000003414}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.5220/0011669000003414}, + file = {Froh23.pdf:pdf\\Froh23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Proceedings of the 16th International Joint Conference on Biomedical Engineering Systems and Technologies}, + automatic = {yes}, + citation-count = {0}, +} @article{Fuch03, author = {Michael H Fuchsj\"ager and Cornelia M {Schaefer-Prokop} and Edith Eisenhuber and Peter Homolka and Michael Weber and Martin A Funovics and Mathias Prokop}, @@ -6197,6 +7883,8 @@ @article{Fuet05 month = {11}, gsid = {10790555521593470988,13094974208617290718}, gscites = {253}, + ss_id = {f1b86085164de19137d89c94ca9a0acd90d9fb2f}, + all_ss_ids = {['f1b86085164de19137d89c94ca9a0acd90d9fb2f']}, } @article{Fuet06, @@ -6215,6 +7903,8 @@ @article{Fuet06 month = {11}, gsid = {1735278023980059617}, gscites = {603}, + ss_id = {ee32a128265b62aff8581e2a28bc9a7c8496cf0d}, + all_ss_ids = {['ee32a128265b62aff8581e2a28bc9a7c8496cf0d']}, } @article{Fuet07, @@ -6233,6 +7923,8 @@ @article{Fuet07 month = {2}, gsid = {3858472456176980186}, gscites = {86}, + ss_id = {6ee67f5869179ec04d752869d0fb9f7f41131fd8}, + all_ss_ids = {['6ee67f5869179ec04d752869d0fb9f7f41131fd8']}, } @article{Fuett04, @@ -6249,6 +7941,8 @@ @article{Fuett04 pmid = {15486528}, gsid = {4129980350114929790,6707800161905153786}, gscites = {176}, + ss_id = {796c7d3bc7978fba27b164353a948395f88289f3}, + all_ss_ids = {['796c7d3bc7978fba27b164353a948395f88289f3']}, } @article{Gal21, @@ -6266,6 +7960,9 @@ @article{Gal21 algorithm = {https://grand-challenge.org/algorithms/calcium-scoring-in-ct-showing-the-heart/}, file = {Gal21.pdf:pdf\\Gal21.pdf:PDF}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/235298}, + ss_id = {3b4df694a48f1f01b83215e8be048e1f83ef9bb9}, + all_ss_ids = {['3b4df694a48f1f01b83215e8be048e1f83ef9bb9']}, + gscites = {25}, } @article{Gala92, @@ -6387,6 +8084,8 @@ @inproceedings{Gall13 month = {3}, gsid = {13232639958665219632}, gscites = {4}, + ss_id = {a9085fd32666cee3c7dd7117d4654f4e09ad4337}, + all_ss_ids = {['a9085fd32666cee3c7dd7117d4654f4e09ad4337']}, } @conference{Gall14, @@ -6412,7 +8111,9 @@ @article{Gall16 pmid = {26002132}, month = {5}, gsid = {15682115650147851893}, - gscites = {32}, + gscites = {52}, + ss_id = {0586cbd2a31688a22dac13eafb768fa2edf56b47}, + all_ss_ids = {['0586cbd2a31688a22dac13eafb768fa2edf56b47']}, } @article{Gall17, @@ -6430,7 +8131,8 @@ @article{Gall17 optnote = {DIAG, RADIOLOGY}, pmid = {28423189}, gsid = {9797853204362969292,6492259953092023726}, - gscites = {2}, + gscites = {4}, + all_ss_ids = {['d1e0a041fc417d82c584191ecf5b3b536127d9e0', 'e15e0cd21632aa9d6643b314a4d1d1a6580f2b39']}, } @article{Gall17a, @@ -6448,7 +8150,9 @@ @article{Gall17a pmid = {29227997}, month = {12}, gsid = {12834685557055984441}, - gscites = {7}, + gscites = {11}, + ss_id = {02a6b1ae18332136192bc7977c366e6340e5c62c}, + all_ss_ids = {['02a6b1ae18332136192bc7977c366e6340e5c62c']}, } @conference{Galp11, @@ -6500,7 +8204,9 @@ @inproceedings{Garc08a pmid = {19163944}, month = {8}, gsid = {12824459198516979159}, - gscites = {48}, + gscites = {54}, + ss_id = {56f820f835847a8d976f67889dce8d3904d838cf}, + all_ss_ids = {['56f820f835847a8d976f67889dce8d3904d838cf']}, } @article{Garc09, @@ -6518,7 +8224,9 @@ @article{Garc09 pmid = {19430906}, month = {5}, gsid = {17642644843566285360}, - gscites = {67}, + gscites = {73}, + ss_id = {8c75cf4de784901aba7d8671a1be37789073407b}, + all_ss_ids = {['8c75cf4de784901aba7d8671a1be37789073407b']}, } @article{Garc09a, @@ -6537,6 +8245,8 @@ @article{Garc09a month = {1}, gsid = {11747171028471696967}, gscites = {193}, + ss_id = {87a7e292cd3b829687b776d158feb9ba2a64c6ac}, + all_ss_ids = {['87a7e292cd3b829687b776d158feb9ba2a64c6ac']}, } @inproceedings{Garc15, @@ -6563,6 +8273,20 @@ @inproceedings{Garc16 optnote = {DIAG, RADIOLOGY}, } +@inproceedings{Garc20, + author = {Garc\'{i}a, Eloy and Diez, Yago and Oliver, Arnau and Karssemeijer, Nico and Mart\'{i}, Joan and Mart\'{i}, Robert and Diaz, Oliver}, + title = {~~Evaluation of elastic parameters for breast compression using a MRI-mammography registration approach}, + doi = {10.1117/12.2564155}, + year = {2020}, + abstract = {Patient-specific finite element (FE) models of the breast have received increasing attention due to the potential capability of fusing information from different image modalities. During the Magnetic Resonance Imaging (MRI) to X-ray mammography (MG) registration procedure, a FE model is compressed mimicking the mammographic acquisition. To develop an accurate model of the breast, the elastic properties and stress-strain relationship of breast tissues need to be properly defined. Several studies (in vivo and ex vivo experiments) have proposed a range of values associated to the mechanical properties of different tissues. This work analyse the elastic parameters (Young Modulus and Poisson ratio) obtained during the process of registering MRI to X-ray MG images. Position, orientation, elastic parameters and amount of compression are optimised using a simulated annealing algorithm, until the biomechanical model reaches a suitable position with respect to the corresponding mammogram. FE models obtained from 29 patients, 46 MRI-MG studies, were used to extract the optimal elastic parameters for breast compression. The optimal Young modulus obtained in the entire dataset correspond to 4.46 ± 1.81 kP a for adipose and 16.32 ± 8.36 kP a for glandular tissue, while the average Poisson ratio was 0.0492 ± 0.004. Furthermore, we did not find a correlation between the elastic parameters and other patient-specific factors such as breast density or patient age.}, + url = {http://dx.doi.org/10.1117/12.2564155}, + file = {Garc20.pdf:pdf\\Garc20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {15th International Workshop on Breast Imaging (IWBI2020)}, + automatic = {yes}, + citation-count = {0}, +} + @inproceedings{Gatt09, author = {Gatta, Carlo and Valencia, Juan Diego Gomez and Ciompi, Francesco and Leor, Oriol Rodriguez and Radeva, Petia}, title = {Toward robust myocardial blush grade estimation in contrast angiography}, @@ -6611,6 +8335,8 @@ @article{Gatt14 gsid = {16487560082385627861}, gscites = {11}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/136855}, + ss_id = {25c32af274aa8bed2e01063cc8d71c4d1ff238f7}, + all_ss_ids = {['25c32af274aa8bed2e01063cc8d71c4d1ff238f7']}, } @article{Gees19, @@ -6622,13 +8348,14 @@ @article{Gees19 doi = {10.1007/s13402-019-00429-z}, abstract = {Purpose:Tumor-stroma ratio (TSR) serves as an independent prognostic factor in colorectal cancer and other solid malignancies. The recent introduction of digital pathology in routine tissue diagnostics holds opportunities for automated TSR analysis. We investigated the potential of computer-aided quantification of intratumoral stroma in rectal cancer whole-slide images. Methods: Histological slides from 129 rectal adenocarcinoma patients were analyzed by two experts who selected a suitable stroma hot-spot and visually assessed TSR. A semi-automatic method based on deep learning was trained to segment all relevant tissue types in rectal cancer histology and subsequently applied to the hot-spots provided by the experts. Patients were assigned to a 'stroma-high' or 'stroma-low' group by both TSR methods (visual and automated). This allowed for prognostic comparison between the two methods in terms of disease-specific and disease-free survival times. Results: With stroma-low as baseline, automated TSR was found to be prognostic independent of age, gender, pT-stage, lymph node status, tumor grade, and whether adjuvant therapy was given, both for disease-specific survival (hazard ratio = 2.48 (95% confidence interval 1.29-4.78)) and for disease-free survival (hazard ratio = 2.05 (95% confidence interval 1.11-3.78)). Visually assessed TSR did not serve as an independent prognostic factor in multivariate analysis. Conclusions: This work shows that TSR is an independent prognosticator in rectal cancer when assessed automatically in user-provided stroma hot-spots. The deep learning-based technology presented here may be a significant aid to pathologists in routine diagnostics.}, file = {Gees19.pdf:pdf\\Gees19.pdf:PDF - timestamp = (01-03-2019}, + timestamp = (01-03-2019}, optnote = {DIAG, RADIOLOGY}, pmid = {30825182}, month = {3}, gsid = {14546191034356758304}, - gscites = {8}, + gscites = {72}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/204300}, + all_ss_ids = {['2d7797e69a35b5ffe021a2762ef251764562dca4', 'c7ce6b90707871b922aca4cb6a8b16625980788b']}, } @inproceedings{Geij18, @@ -6644,7 +8371,9 @@ @inproceedings{Geij18 optnote = {DIAG}, month = {3}, gsid = {12313121299719118304}, - gscites = {3}, + gscites = {11}, + ss_id = {8e8b923c809ab504558fd4822a7696c6e2492cfe}, + all_ss_ids = {['8e8b923c809ab504558fd4822a7696c6e2492cfe']}, } @mastersthesis{Geij19, @@ -6669,6 +8398,9 @@ @inproceedings{Geij21 abstract = {The high incidence of BCC skin cancer caused that the amount of work for pathologists has risen to unprecedented levels. Acquiring outlined annotations for training deep learning models classifying BCC is often tedious and time consuming. End-to-end learning provides relief in labelling data by using a single label to predict an clinical outcome. We compared multiple-instance-learning (MIL) and a streaming performance for detecting BCC in 420 slides collected from 72 BCC positive patients. This resulted in an ROC with AUC of 0.96 and 0.98 for respectively streaming and MIL. Saliency and probability maps showed that both methods were capable of classifying classifying BCC in an end-to-end way with single labels.}, file = {Geij21.pdf:pdf\\Geij21.pdf:PDF}, year = {2021}, + ss_id = {4ea1f801a7d14b2bf284fef25d83bcc222c01629}, + all_ss_ids = {['4ea1f801a7d14b2bf284fef25d83bcc222c01629']}, + gscites = {2}, } @article{Geld18, @@ -6680,7 +8412,18 @@ @article{Geld18 optnote = {DIAG}, month = {6}, gsid = {10073906740286911860}, - gscites = {3}, + gscites = {6}, + ss_id = {08509a76ccc2ef61e33ca17c13733ad2397780bb}, + all_ss_ids = {['08509a76ccc2ef61e33ca17c13733ad2397780bb']}, +} + +@conference{Genu22, + author = {E. A. J. van Genugten and B. Piet and G. Schreibelt and T. van Oorschot and G. van den Heuvel and F. Ciompi and C. Jacobs and J. de Vries and M. M. van den Heuvel and E. Aarntzen}, + title = {Imaging tumor-infiltrating CD8 (+) T-cells in non-small cell lung cancer patients upon neo-adjuvant treatment with durvalumab}, + booktitle = {European Molecular Imaging Meeting}, + year = {2022}, + abstract = {INTRODUCTION: Immune checkpoint inhibitors (ICI), like targeting programmed death receptor ligand 1 (PD-L1), have revolutionized anti-cancer treatments, including non-small cell lung cancer (NSCLC) [1, 2]. Assessment of PD-L1 expression on tumor biopsies is current practice, but there is a need for additional biomarkers correlating to the complex mechanism of action of ICI. The presence of tumor-infiltrating CD8+ T-cells (TILs) is a robust biomarker associated with immune therapy success [3-6]. Tools to track TILs in patients during ICI treatment would allow further development of immune-oncology drugs. METHODS: This ongoing single-center prospective study (NCT03853187) includes patients with histologically proven T1b-3N0-1M0 NSCLC eligible for resection. Exclusion criteria are previous anti-cancer therapy <6 months and immune disorders or suppression. Patients receive two courses neo-adjuvant durvalumab (750mg Q2W) and consecutive TIL imaging. Cohort 1 underwent apheresis and magnetic-activated cells sorting (CliniMACS) to isolate 100 x10e6 autologous CD8+ T-cells for ex vivo labeling with 111In-oxine. Re-injection was followed by 4h post-injection (p.i.) planar imaging, 70h p.i. SPECT imaging, standard-of-care surgery and 78h p.i. uSPECT of the resected lobe. Patients in cohort 2 (ongoing) receive 1.5mg 89Zr-Df-crefmirlimab followed by PET/CT 24h p.i. and 74h p.i. uPET/CT of the resected lobe. RESULTS/DISCUSSION: In cohort 1, 8/10 patients completed TIL imaging; one procedure was withdrawn due to COVID-19 restrictions and one due to unsuccessful T-cell isolation. CliniMACS yield ranged 240-714 x10e6 CD8+ T-cells, purity 84-97% and cell viability 92-100%. Labeling efficacy of 100 x10e6 cells for re-injection ranged 42-64% with injected activity of 22,4-36,7 MBq In-111. TIL imaging was completed by 2/3 patients in cohort 2, one subject discontinued neo-adjuvant treatment due to pneumonia. To determine the potential for visual assessment of TILs, we analyzed ratios between tumor uptake and contralateral lung. We observed large variations within cohort 1, dependent on tumor localization. Ratios between tumor and bloodpool activity were determined to quantify specific accumulation in the tumor. Our results favor quantification of T-cells on PET over SPECT given its higher sensitivity and spatial resolution. Correlation of imaging with CD8+ T-cells in the resected tumor is ongoing (will be presented). CONCLUSIONS: We implemented two methods for tracking CD8+ T-cells in early-stage NSCLC patients after neo-adjuvant durvalumab. Although ex vivo cell labeling perhaps more specifically targets migrating TILs into the tumor, 89Zr-Df-crefmirlimab has the potential to also target residing cells. Quantitative correlation with presence of TILs in the resected tumor will help to determine the role of these imaging tools in the development of immune-oncology drugs.}, + optnote = {DIAG, RADIOLOGY}, } @inproceedings{Geor16, @@ -6710,6 +8453,17 @@ @article{Gern18 file = {Gern18.pdf:pdf\\Gern18.pdf:PDF}, } +@mastersthesis{Geur23, + author = {Geurtjens, Ruben and Peeters, Dr\'{e} and Jacobs, Colin}, + title = {Self-supervised Out-of-Distribution detection for medical imaging}, + abstract = {Out-of-distribution (OOD) detection is an important aspect of deep learning-based medicalimaging approaches for ensuring the safety and accuracy of diagnostic tools. In this paper,we investigate the effectiveness of three self-supervised learning techniques for OOD detec-tion in both a labeled RadboudXR dataset and a clinical dataset with OOD data but nolabels. Specifically, we explore two predictive self-supervised techniques and one contrastiveself-supervised technique and evaluate their ability to detect OOD samples. Furthermore,we evaluate the performance of the state-of-the-art vision transformer model on medicaldata both as a standalone method and as the backbone of a self-supervised task. Our resultsindicate that the contrastive self-supervised method Bootstrap-Your-Own-latent (BYOL)and vision transformer model were not effective in detecting OOD samples. However, thepredictive methods performed well on both 2D and 3D data, and demonstrated scalabilityin difficulty. These findings suggest the potential utility of self-supervised learning tech-niques for OOD detection in medical imaging. When determining an OOD cut-off valuefor clinical usage there are, however, problems with separation between datasets. Thesechallenges suggest that further research is needed before these techniques can be adoptedfor clinical usage.}, + file = {Geur23.pdf:pdf\\Geur23.pdf:PDF}, + journal = {Master thesis}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2023}, +} + @inproceedings{Ghaf13, author = {Mohsen Ghafoorian and Nasrin Taghizadeh and Hamid Beigy}, title = {Automatic Abstraction in Reinforcement Learning Using Ant System Algorithm}, @@ -6733,7 +8487,9 @@ @inproceedings{Ghaf15 optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {18321616094304826808}, - gscites = {12}, + gscites = {14}, + ss_id = {aa66d8655463fa432fd3ce023e6ad10eec2d337a}, + all_ss_ids = {['aa66d8655463fa432fd3ce023e6ad10eec2d337a']}, } @inproceedings{Ghaf16, @@ -6748,7 +8504,9 @@ @inproceedings{Ghaf16 optnote = {DIAG, RADIOLOGY}, month = {4}, gsid = {13928783358122591393}, - gscites = {47}, + gscites = {72}, + ss_id = {19ef79992e71a374d59f8be3feb93125fd7815d9}, + all_ss_ids = {['19ef79992e71a374d59f8be3feb93125fd7815d9']}, } @article{Ghaf16a, @@ -6761,22 +8519,24 @@ @article{Ghaf16a pages = {6246-6258}, doi = {10.1118/1.4966029}, abstract = {Purpose: - White matter hyperintensities (WMH) are seen on FLAIR-MRI in several neurological disorders, including multiple sclerosis, dementia, Parkinsonism, stroke and cerebral small vessel disease (SVD). WMHs are often used as biomarkers for prognosis or disease progression in these diseases, and additionally longitudinal quantification of WMHs is used to evaluate therapeutic strategies. Human readers show considerable disagreement and inconsistency on detection of small lesions. A multitude of automated detection algorithms for WMHs exists, but since most of the current automated approaches are tuned to optimize segmentation performance according to Jaccard or Dice scores, smaller WMHs often go undetected in these approaches. In this paper, the authors propose a method to accurately detect all WMHs, large as well as small. - - Methods: - A two-stage learning approach was used to discriminate WMHs from normal brain tissue. Since small and larger WMHs have quite a different appearance, the authors have trained two probabilistic classifiers: one for the small WMHs (<3 mm effective diameter) and one for the larger WMHs (>3 mm in-plane effective diameter). For each size-specific classifier, an Adaboost is trained for five iterations, with random forests as the basic classifier. The feature sets consist of 22 features including intensities, location information, blob detectors, and second order derivatives. The outcomes of the two first-stage classifiers were combined into a single WMH likelihood by a second-stage classifier. Their method was trained and evaluated on a dataset with MRI scans of 362 SVD patients (312 subjects for training and validation annotated by one and 50 for testing annotated by two trained raters). To analyze performance on the separate test set, the authors performed a free-response receiving operating characteristic (FROC) analysis, instead of using segmentation based methods that tend to ignore the contribution of small WMHs. - - Results: - Experimental results based on FROC analysis demonstrated a close performance of the proposed computer aided detection (CAD) system to human readers. While an independent reader had 0.78 sensitivity with 28 false positives per volume on average, their proposed CAD system reaches a sensitivity of 0.73 with the same number of false positives. - - Conclusions: - The authors have developed a CAD system with all its ingredients being optimized for a better detection of WMHs of all size, which shows performance close to an independent reader.}, + White matter hyperintensities (WMH) are seen on FLAIR-MRI in several neurological disorders, including multiple sclerosis, dementia, Parkinsonism, stroke and cerebral small vessel disease (SVD). WMHs are often used as biomarkers for prognosis or disease progression in these diseases, and additionally longitudinal quantification of WMHs is used to evaluate therapeutic strategies. Human readers show considerable disagreement and inconsistency on detection of small lesions. A multitude of automated detection algorithms for WMHs exists, but since most of the current automated approaches are tuned to optimize segmentation performance according to Jaccard or Dice scores, smaller WMHs often go undetected in these approaches. In this paper, the authors propose a method to accurately detect all WMHs, large as well as small. + + Methods: + A two-stage learning approach was used to discriminate WMHs from normal brain tissue. Since small and larger WMHs have quite a different appearance, the authors have trained two probabilistic classifiers: one for the small WMHs (<3 mm effective diameter) and one for the larger WMHs (>3 mm in-plane effective diameter). For each size-specific classifier, an Adaboost is trained for five iterations, with random forests as the basic classifier. The feature sets consist of 22 features including intensities, location information, blob detectors, and second order derivatives. The outcomes of the two first-stage classifiers were combined into a single WMH likelihood by a second-stage classifier. Their method was trained and evaluated on a dataset with MRI scans of 362 SVD patients (312 subjects for training and validation annotated by one and 50 for testing annotated by two trained raters). To analyze performance on the separate test set, the authors performed a free-response receiving operating characteristic (FROC) analysis, instead of using segmentation based methods that tend to ignore the contribution of small WMHs. + + Results: + Experimental results based on FROC analysis demonstrated a close performance of the proposed computer aided detection (CAD) system to human readers. While an independent reader had 0.78 sensitivity with 28 false positives per volume on average, their proposed CAD system reaches a sensitivity of 0.73 with the same number of false positives. + + Conclusions: + The authors have developed a CAD system with all its ingredients being optimized for a better detection of WMHs of all size, which shows performance close to an independent reader.}, file = {Ghaf16a.pdf:pdf\\Ghaf16a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {27908171}, month = {11}, gsid = {2058349102085812132}, - gscites = {33}, + gscites = {61}, + ss_id = {ea864dd65dfd1207fde07941ceaf1bda38ba9c1b}, + all_ss_ids = {['ea864dd65dfd1207fde07941ceaf1bda38ba9c1b']}, } @article{Ghaf17a, @@ -6792,7 +8552,9 @@ @article{Ghaf17a optnote = {DIAG}, pmid = {28271039}, gsid = {14192982400070367057}, - gscites = {85}, + gscites = {117}, + ss_id = {7f7990b0d4a5f7b356d06c7017a337b5ae3b31cf}, + all_ss_ids = {['7f7990b0d4a5f7b356d06c7017a337b5ae3b31cf']}, } @inproceedings{Ghaf17b, @@ -6809,7 +8571,9 @@ @inproceedings{Ghaf17b file = {Ghaf17b.pdf:pdf\\Ghaf17b.pdf:PDF}, optnote = {DIAG}, gsid = {14433108461414425390}, - gscites = {124}, + gscites = {279}, + ss_id = {102a580a89b7e9d5a6d8e79864db183d6b5fb6ad}, + all_ss_ids = {['102a580a89b7e9d5a6d8e79864db183d6b5fb6ad']}, } @article{Ghaf17c, @@ -6828,7 +8592,8 @@ @article{Ghaf17c pmid = {28698556}, month = {7}, gsid = {5144190841808936245}, - gscites = {117}, + gscites = {208}, + all_ss_ids = {['91feb0cb04d67a5c38d0a4e7c67fc7874491abc6', '925024223075dcc5b4db81f4b07eddf2cbdf4519']}, } @inproceedings{Ghaf18, @@ -6846,7 +8611,9 @@ @inproceedings{Ghaf18 optnote = {DIAG}, month = {3}, gsid = {14380272279536530363}, - gscites = {14}, + gscites = {17}, + ss_id = {2e11acb5e60b59af637cc7faea6e390ed6a9429b}, + all_ss_ids = {['2e11acb5e60b59af637cc7faea6e390ed6a9429b']}, } @phdthesis{Ghaf18a, @@ -6863,6 +8630,21 @@ @phdthesis{Ghaf18a journal = {PhD thesis}, } +@book{Gibs15, + author = {Gibson, Eli and Huisman, Henkjan J. and Barratt, Dean C.}, + title = {~~Statistical Power in Image Segmentation: Relating Sample Size to Reference Standard Quality}, + doi = {10.1007/978-3-319-24574-4_13}, + year = {2015}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-319-24574-4_13}, + file = {Gibs15.pdf:pdf\\Gibs15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Lecture Notes in Computer Science}, + automatic = {yes}, + citation-count = {0}, + pages = {105-113}, +} + @article{Gibs17, author = {Eli Gibson and Yipeng Hu and Henkjan Huisman and Dean Barratt}, title = {Designing image segmentation studies: statistical power, sample size and reference standard quality}, @@ -6873,16 +8655,18 @@ @article{Gibs17 doi = {10.1016/j.media.2017.07.004}, url = {https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5666910/}, abstract = {Segmentation algorithms are typically evaluated by comparison to an accepted reference standard. The cost of generating accurate reference standards for medical image segmentation can be substantial. Since the study cost and the likelihood of detecting a clinically meaningful difference in accuracy both depend on the size and on the quality of the study reference standard, balancing these trade-offs supports the efficient use of research resources. - - In this work, we derive a statistical power calculation that enables researchers to estimate the appropriate sample size to detect clinically meaningful differences in segmentation accuracy (i.e. the proportion of voxels matching the reference standard) between two algorithms. Furthermore, we derive a formula to relate reference standard errors to their effect on the sample sizes of studies using lower-quality (but potentially more affordable and practically available) reference standards. - - The accuracy of the derived sample size formula was estimated through Monte Carlo simulation, demonstrating, with 95% confidence, a predicted statistical power within 4% of simulated values across a range of model parameters. This corresponds to sample size errors of less than 4 subjects and errors in the detectable accuracy difference less than 0.6%. The applicability of the formula to real-world data was assessed using bootstrap resampling simulations for pairs of algorithms from the PROMISE12 prostate MR segmentation challenge data set. The model predicted the simulated power for the majority of algorithm pairs within 4% for simulated experiments using a high-quality reference standard and within 6% for simulated experiments using a low-quality reference standard. A case study, also based on the PROMISE12 data, illustrates using the formulae to evaluate whether to use a lower-quality reference standard in a prostate segmentation study.}, + + In this work, we derive a statistical power calculation that enables researchers to estimate the appropriate sample size to detect clinically meaningful differences in segmentation accuracy (i.e. the proportion of voxels matching the reference standard) between two algorithms. Furthermore, we derive a formula to relate reference standard errors to their effect on the sample sizes of studies using lower-quality (but potentially more affordable and practically available) reference standards. + + The accuracy of the derived sample size formula was estimated through Monte Carlo simulation, demonstrating, with 95% confidence, a predicted statistical power within 4% of simulated values across a range of model parameters. This corresponds to sample size errors of less than 4 subjects and errors in the detectable accuracy difference less than 0.6%. The applicability of the formula to real-world data was assessed using bootstrap resampling simulations for pairs of algorithms from the PROMISE12 prostate MR segmentation challenge data set. The model predicted the simulated power for the majority of algorithm pairs within 4% for simulated experiments using a high-quality reference standard and within 6% for simulated experiments using a low-quality reference standard. A case study, also based on the PROMISE12 data, illustrates using the formulae to evaluate whether to use a lower-quality reference standard in a prostate segmentation study.}, file = {Gibs17.pdf:pdf\\Gibs17.pdf:PDF}, optnote = {DIAG}, pmid = {28772163}, month = {12}, gsid = {10750453317236818615}, - gscites = {7}, + gscites = {14}, + ss_id = {91628553dee20285dd23c7a77bcc1b9510abe0c8}, + all_ss_ids = {['91628553dee20285dd23c7a77bcc1b9510abe0c8']}, } @inproceedings{Gibs18, @@ -6892,11 +8676,13 @@ @inproceedings{Gibs18 year = {2018}, doi = {10.1007/978-3-030-00937-3_58}, abstract = {Deep-learning-based segmentation tools have yielded higher reported segmentation accuracies for many medical imaging applications. However, inter-site variability in image properties can challenge the translation of these tools to data from 'unseen' sites not included in the training data. This study quantifies the impact of inter-site variability on the accuracy of deep-learning-based segmentations of the prostate from magnetic resonance (MR) images, and evaluates two strategies for mitigating the reduced accuracy for data from unseen sites: training on multi-site data and training with limited additional data from the unseen site. Using 376 T2-weighted prostate MR images from six sites, we compare the segmentation accuracy (Dice score and boundary distance) of - three deep-learning-based networks trained on data from a single site and on various configurations of data from multiple sites. We found that the segmentation accuracy of a single-site network was substantially worse on data from unseen sites than on data from the training site. Training on multi-site data yielded marginally improved accuracy and robustness. However, including as few as 8 subjects from the unseen site, e.g. during commissioning of a new clinical system, yielded substantial improvement (regaining 75% of the difference in Dice score).}, + three deep-learning-based networks trained on data from a single site and on various configurations of data from multiple sites. We found that the segmentation accuracy of a single-site network was substantially worse on data from unseen sites than on data from the training site. Training on multi-site data yielded marginally improved accuracy and robustness. However, including as few as 8 subjects from the unseen site, e.g. during commissioning of a new clinical system, yielded substantial improvement (regaining 75% of the difference in Dice score).}, file = {Gibs18.pdf:pdf\\Gibs18.pdf:PDF}, optnote = {DIAG}, gsid = {6478096673123844302}, - gscites = {15}, + gscites = {46}, + ss_id = {7920c85aa097d57a787de42802120aaaf3f94ae9}, + all_ss_ids = {['7920c85aa097d57a787de42802120aaaf3f94ae9']}, } @article{Giet07a, @@ -6914,7 +8700,9 @@ @article{Giet07a pmid = {17709835}, month = {9}, gsid = {9253661431333367415}, - gscites = {67}, + gscites = {69}, + ss_id = {82a456a6d5fec100feaeeeffa1dbfc24754ccc12}, + all_ss_ids = {['82a456a6d5fec100feaeeeffa1dbfc24754ccc12']}, } @conference{Giet07c, @@ -6956,8 +8744,10 @@ @article{Giet10 pmid = {19734030}, month = {1}, gsid = {3368562612864940397}, - gscites = {27}, + gscites = {30}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/88840}, + ss_id = {2262ebcd92f9597d37813d0d3d27d397260cf674}, + all_ss_ids = {['2262ebcd92f9597d37813d0d3d27d397260cf674']}, } @article{Giet10a, @@ -6973,6 +8763,9 @@ @article{Giet10a number = {7}, month = {7}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/89855}, + ss_id = {93eee9d038ddbc0564db2fffd697209ba44d2f86}, + all_ss_ids = {['93eee9d038ddbc0564db2fffd697209ba44d2f86']}, + gscites = {0}, } @article{Gige01, @@ -6987,7 +8780,9 @@ @article{Gige01 number = {12}, pmid = {11811820}, gsid = {2190644524997558672}, - gscites = {154}, + gscites = {240}, + ss_id = {b8aaa440384a41171db801a9dfd7e0eee001fa2c}, + all_ss_ids = {['b8aaa440384a41171db801a9dfd7e0eee001fa2c']}, } @article{Gige13, @@ -7004,7 +8799,9 @@ @article{Gige13 pmid = {23683087}, month = {7}, gsid = {6411119075964017758}, - gscites = {149}, + gscites = {196}, + ss_id = {7b178f081b9c64dae9141dc9d197f49934b00c58}, + all_ss_ids = {['7b178f081b9c64dae9141dc9d197f49934b00c58']}, } @article{Gils99, @@ -7021,6 +8818,8 @@ @article{Gils99 month = {12}, gsid = {12917222000575413863}, gscites = {105}, + ss_id = {214568f4f1bcdbd4423f944a5c6b282201370750}, + all_ss_ids = {['214568f4f1bcdbd4423f944a5c6b282201370750']}, } @inproceedings{Ginn00, @@ -7038,6 +8837,8 @@ @inproceedings{Ginn00 optnote = {DIAG, RADIOLOGY}, month = {6}, gscites = {43}, + ss_id = {68c218a07341bf3a5fd2a587ef4cfb1845046798}, + all_ss_ids = {['68c218a07341bf3a5fd2a587ef4cfb1845046798']}, } @article{Ginn00a, @@ -7054,7 +8855,9 @@ @article{Ginn00a pmid = {11099215}, month = {10}, gsid = {17671026951302443772}, - gscites = {138}, + gscites = {159}, + ss_id = {54b1c3215b4de092378641f88dcd39a773671cd1}, + all_ss_ids = {['54b1c3215b4de092378641f88dcd39a773671cd1', '9d9ab68f1203691be52eab07457ffb7bf6bfdc2b']}, } @article{Ginn00b, @@ -7098,6 +8901,7 @@ @phdthesis{Ginn01a gsid = {3030326376977132859}, gscites = {51}, journal = {PhD thesis}, + all_ss_ids = {['febeaee15cc4cdf91f445ce50e7b59160e90302b']}, } @inproceedings{Ginn01b, @@ -7112,6 +8916,8 @@ @inproceedings{Ginn01b gsid = {9058761510841848250}, optnote = {DIAG, RADIOLOGY}, gscites = {52}, + ss_id = {793cc33a9f29953685ca942d513e82870e9ccd13}, + all_ss_ids = {['793cc33a9f29953685ca942d513e82870e9ccd13']}, } @article{Ginn01c, @@ -7129,6 +8935,8 @@ @article{Ginn01c pmid = {11811823}, gsid = {6827224050908194764}, gscites = {597}, + ss_id = {ceddfad717e3e521d337f35904a1c78c1d75c490}, + all_ss_ids = {['ceddfad717e3e521d337f35904a1c78c1d75c490']}, } @article{Ginn02, @@ -7147,6 +8955,8 @@ @article{Ginn02 month = {8}, gsid = {565682893025109187}, gscites = {652}, + ss_id = {99840ed7cc49c379273099b57bf58f55f5274e64}, + all_ss_ids = {['99840ed7cc49c379273099b57bf58f55f5274e64']}, } @article{Ginn02a, @@ -7163,7 +8973,9 @@ @article{Ginn02a number = {2}, pmid = {11929101}, gsid = {5477064095233485439,10626615429567403174}, - gscites = {255}, + gscites = {262}, + ss_id = {cdc58c3da5a84aa0cb1bc0d38d35c094c3e34579}, + all_ss_ids = {['cdc58c3da5a84aa0cb1bc0d38d35c094c3e34579']}, } @inproceedings{Ginn02b, @@ -7206,6 +9018,8 @@ @inproceedings{Ginn03a month = {5}, gsid = {5452437485381444577}, gscites = {34}, + ss_id = {3d2b3cfbd2d98c33a3667c5f13b12344353582f8}, + all_ss_ids = {['3d2b3cfbd2d98c33a3667c5f13b12344353582f8']}, } @article{Ginn03b, @@ -7222,6 +9036,8 @@ @article{Ginn03b optnote = {DIAG, RADIOLOGY}, month = {4}, gscites = {33}, + ss_id = {631209b8421b31dc7911d58a324a8afa4cb29dcd}, + all_ss_ids = {['631209b8421b31dc7911d58a324a8afa4cb29dcd']}, } @inproceedings{Ginn04, @@ -7236,6 +9052,8 @@ @inproceedings{Ginn04 optnote = {DIAG, RADIOLOGY}, gsid = {2680329712311893870}, gscites = {11}, + ss_id = {5213634acf39625658b52e14b18b2fcc322044c6}, + all_ss_ids = {['5213634acf39625658b52e14b18b2fcc322044c6']}, } @conference{Ginn05, @@ -7285,6 +9103,8 @@ @inproceedings{Ginn06a optnote = {DIAG, RADIOLOGY}, gsid = {6718720819333291168}, gscites = {15}, + ss_id = {eb2b4becf4429230504117b0654db1b7760695d8}, + all_ss_ids = {['eb2b4becf4429230504117b0654db1b7760695d8']}, } @article{Ginn06b, @@ -7353,6 +9173,8 @@ @inproceedings{Ginn08 pmid = {18979751}, gsid = {11381407062701827735}, gscites = {95}, + ss_id = {d38f3d596081bef8ffe67e92d112e0633d0be0fe}, + all_ss_ids = {['d38f3d596081bef8ffe67e92d112e0633d0be0fe']}, } @article{Ginn08a, @@ -7369,7 +9191,9 @@ @article{Ginn08a number = {3}, month = {9}, gsid = {282439729250169033}, - gscites = {14}, + gscites = {16}, + ss_id = {0c4d24dadb96b397af79b4d6ce7087548c797d72}, + all_ss_ids = {['0c4d24dadb96b397af79b4d6ce7087548c797d72']}, } @inproceedings{Ginn08b, @@ -7385,6 +9209,9 @@ @inproceedings{Ginn08b file = {Ginn08b.pdf:pdf\\Ginn08b.pdf:PDF}, optnote = {DIAG, RADIOLOGY, noduleDetectionCT}, month = {3}, + ss_id = {1a0ce62f3c2a0c435c4493e3b287d9c481aef1c7}, + all_ss_ids = {['1a0ce62f3c2a0c435c4493e3b287d9c481aef1c7']}, + gscites = {2}, } @conference{Ginn08c, @@ -7428,7 +9255,9 @@ @article{Ginn09a pmid = {19604661}, month = {11}, gsid = {4166719588344969996}, - gscites = {85}, + gscites = {98}, + ss_id = {1bdcdb8e63f995041451642c53c434face9033f7}, + all_ss_ids = {['1bdcdb8e63f995041451642c53c434face9033f7']}, } @conference{Ginn09b, @@ -7469,7 +9298,9 @@ @article{Ginn10a pmid = {20573538}, month = {12}, gsid = {16898155382402394650}, - gscites = {229}, + gscites = {288}, + ss_id = {39ae90f765547171cc6163ba3ab2e977c86c03d3}, + all_ss_ids = {['39ae90f765547171cc6163ba3ab2e977c86c03d3']}, } @book{Ginn10b, @@ -7498,8 +9329,10 @@ @article{Ginn11 pmid = {22095995}, month = {12}, gsid = {11154494557308057950,14993629594919203131}, - gscites = {197}, + gscites = {244}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/96751}, + ss_id = {cb1fabd765f28f4e61a5e6771b1468cb9a113fc4}, + all_ss_ids = {['cb1fabd765f28f4e61a5e6771b1468cb9a113fc4']}, } @conference{Ginn12, @@ -7517,7 +9350,7 @@ @conference{Ginn13 booktitle = RSNA, year = {2013}, abstract = {BACKGROUND Lung diseases are among the most deadly disorders: chronic obstructive pulmonary disease ({COPD}), a devastating disease with 12 million people in the United States currently diagnosed, ranks #3 on the list of causes of death wordwide. Lung cancer, by far the most common and most deadly cancer in men and women worldwide, ranks #5. Tuberculosis ({TB}), despite the availability of a cheap and effective cure, ranks #10. Imaging is crucially important for early detection, diagnosis, follow-up, and treatment planning of {COPD}, lung cancer and {TB}. Chest radiography and computed tomography are the most important imaging modalities for the lung. METHODOLOGY/APPLICATION We present a flexible workstation for a quick and effective extraction of quantitative imaging parameters related to {COPD}, lung cancer and {TB}. The workstation loads an arbitrary number of {CT} and chest radiography studies of each subject simultaneously, allowing the user to instantly track the evolution of any lesion. Each {CT} scan is elastically registered to all prior {CT} scans of the same subject. Findings in prior scans have been automatically propagated and linked to findings in the current scan. All scans and processing results are preloaded in the background to ensure rapid reading. The {CIRRUS} {L}ung workstation has been developed jointly by the Diagnostic Image Analysis Group, Radboud University Nijmegen Medical Centre, Nijmegen The Netherlands, and Fraunhofer MEVIS, Bremen, Germany. It is based on the MeVisLab software platform. The workstation is available through research collaboration agreements and in active use in a variety of projects . {CIRRUS} {L}ung has a number of modes that will be demonstrated: 1) High throughput lung screening. Scan quality is automatically assessed; data with low quality, artifacts or underlying interstitial lung disease are flagged. High sensitivity computerized detection (CAD) of solid nodules and sub-solid nodules is included. High throughput reading with CAD as a first reader is supported. Each nodule is automatically characterized as solid, part-solid, non-solid, or benign (calcified lesions, perifissural lymph nodes). Volumetry, volume growth rate, mass and mass growth rate are automatically computed with advanced segmentation algorithms that have can handle sub-solid lesions and segment the solid core of part-solid nodules. Findings are summarized in a structured report. Follow-up recommendation according to Fleischner guidelines are included. 2) Clinical oncology work-up. This mode is similar to the screening mode, but includes completely automatic generation of {RECIST} workup. 3) Chest radiography lung screening. Chest radiographs can be annotated and viewed with various tools such as bone suppression and gray scale inversion. Computer-aided detection and interactive CAD reading are supported. 4) {COPD} quantification. Elastic registration between inspiration and expiration scans has been precomputed and allows for linked scrolling. Lungs, lobes, airways, fissures, and segments are automatically segmented for regional functional analysis. In case the user is not satisfied with the segmentation results, (s)he can quickly correct these with an intuitive interactive correction method. {CT} image standardization is included using a precomputed dedicated energy correction algorithm that makes quantifications less dependent on scan protocol (scanner model, kernel, iterative reconstruction). Once the segmentations have been approved, a range of quantifiable features can be visualized in the workstation: parenchyma features, airway features, and fissural completeness. Measurements are reported for both inspiration and expiration for the whole lung as well as per lobe and segment. Changes between inspiration and expiration are reported. After workup of a study of a {COPD} patient, a structured report is produced that contains screenshots, renderings, and all requested measurements. 5) {TB} Diagnostics. In this mode chest radiographs can be inspected and texture analysis that detects - abnormalities consistent with {TB} can be inspected. A novel symmetry analysis is available to facilitate contralateral comparisons. Detection and quantification of costophrenic angle bluntness is included. Cavities can be semi-automatically segmented. DEMONSTRATION STRATEGY The exhibit will be accompanied by an informational poster that will highlight the key features and algorithmic concepts that underlie the automated analysis. Attendees will be able to gain hands-on experience with the workstation and read cases. For each reading mode, extensive example datasets are available. In particular, the completely processed {LIDC/IDRI} database, including all {CT} scans and chest radiographs, is available for inspection. REFERENCES AND PUBLICATIONS The algorithms presented in the showcase are based on over 20 different journal publications. These are listed on http://cirrus.diagnijmegen.nl.}, + abnormalities consistent with {TB} can be inspected. A novel symmetry analysis is available to facilitate contralateral comparisons. Detection and quantification of costophrenic angle bluntness is included. Cavities can be semi-automatically segmented. DEMONSTRATION STRATEGY The exhibit will be accompanied by an informational poster that will highlight the key features and algorithmic concepts that underlie the automated analysis. Attendees will be able to gain hands-on experience with the workstation and read cases. For each reading mode, extensive example datasets are available. In particular, the completely processed {LIDC/IDRI} database, including all {CT} scans and chest radiographs, is available for inspection. REFERENCES AND PUBLICATIONS The algorithms presented in the showcase are based on over 20 different journal publications. These are listed on http://cirrus.diagnijmegen.nl.}, optnote = {DIAG, RADIOLOGY}, } @@ -7532,6 +9365,8 @@ @inproceedings{Ginn13a optnote = {DIAG, RADIOLOGY}, gsid = {614866246313148987}, gscites = {5}, + ss_id = {55fe10fb639ffc833c1ba84dcb2b673275ef3773}, + all_ss_ids = {['55fe10fb639ffc833c1ba84dcb2b673275ef3773']}, } @conference{Ginn14, @@ -7555,7 +9390,9 @@ @inproceedings{Ginn15 optnote = {DIAG, RADIOLOGY}, month = {4}, gsid = {3459188085599203383}, - gscites = {227}, + gscites = {262}, + ss_id = {81570ccbf4a1edb8898e8969f9befa3561e46318}, + all_ss_ids = {['81570ccbf4a1edb8898e8969f9befa3561e46318']}, } @article{Ginn17, @@ -7573,7 +9410,8 @@ @article{Ginn17 pmid = {28211015}, month = {2}, gsid = {14217309155856769796}, - gscites = {76}, + gscites = {157}, + all_ss_ids = {['46479bbea7749cb2db35b139206039531327053c', 'b69fe5a837277ddbea5215d6bacd3a902e9d11ce']}, } @article{Ginn18, @@ -7589,7 +9427,9 @@ @article{Ginn18 optnote = {DIAG, RADIOLOGY}, pmid = {30422089}, gsid = {3985594963871751836}, - gscites = {4}, + gscites = {10}, + ss_id = {f46d34f0234422fb7db3837dd2b32efe03fc6e9a}, + all_ss_ids = {['f46d34f0234422fb7db3837dd2b32efe03fc6e9a', '2723ce1686eea776df179e362cd9a8b8e2bb7ff1']}, } @conference{Ginn18a, @@ -7611,8 +9451,8 @@ @conference{Ginn18b year = {2018}, doi = {10.5334/jbsr.1656}, abstract = {Artificial intelligence (AI), particularly deep learning, is currently at the top of the hype cycle. Application of this technology to the analysis of medical images is attracting a lot of attention worldwide. - - At the same time, the average radiologist is using very little to no AI tools in her daily practice. This lecture provides a brief explanation of deep learning and explains what makes this technology different from previous approaches and why it is so powerful. A number of AI applications, some in use that were developed and commercialized in our research group, are presented. These applications serve as examples to define a number of different types of AI products that differ in the way they are placed in (or outside) the workflow of radiologists. This lecture emphasizes how some of these tools replace (a small part of the work of) radiologists, while other augment radiologists, and yet others take the radiologists out of the loop in the care cycle of the patient. Finally, it is discussed how radiologists can, and should, be involved in the development of real-life AI applications.}, + + At the same time, the average radiologist is using very little to no AI tools in her daily practice. This lecture provides a brief explanation of deep learning and explains what makes this technology different from previous approaches and why it is so powerful. A number of AI applications, some in use that were developed and commercialized in our research group, are presented. These applications serve as examples to define a number of different types of AI products that differ in the way they are placed in (or outside) the workflow of radiologists. This lecture emphasizes how some of these tools replace (a small part of the work of) radiologists, while other augment radiologists, and yet others take the radiologists out of the loop in the care cycle of the patient. Finally, it is discussed how radiologists can, and should, be involved in the development of real-life AI applications.}, file = {Ginn18b.pdf:pdf\\Ginn18b.pdf:PDF}, optnote = {DIAG}, gsid = {2647745699443927210}, @@ -7629,6 +9469,9 @@ @article{Ginn20 journal = Radiology, optnote = {DIAG, INPRESS}, file = {Ginn20.pdf:pdf\\Ginn20.pdf:PDF}, + ss_id = {11cfab281e93fa7803931be8b589aa1f09d7af02}, + all_ss_ids = {['11cfab281e93fa7803931be8b589aa1f09d7af02']}, + gscites = {11}, } @article{Ginn22, @@ -7640,6 +9483,9 @@ @article{Ginn22 pages = {1-2}, volume = {00}, file = {Ginn22.pdf:pdf\\Ginn22.pdf:PDF}, + ss_id = {13d81eb12871aa6a4f15fc5c9b127bc320f55179}, + all_ss_ids = {['13d81eb12871aa6a4f15fc5c9b127bc320f55179']}, + gscites = {1}, } @article{Ginn98, @@ -7674,7 +9520,9 @@ @inproceedings{Ginn99 gsid = {10753743025636204872}, optnote = {DIAG, RADIOLOGY}, month = {6}, - gscites = {23}, + gscites = {31}, + ss_id = {b6c2b03aef8689cbd5bbe2cae322eaaea832b7f6}, + all_ss_ids = {['b6c2b03aef8689cbd5bbe2cae322eaaea832b7f6']}, } @inproceedings{Ginn99a, @@ -7691,7 +9539,9 @@ @inproceedings{Ginn99a optnote = {DIAG, RADIOLOGY}, month = {10}, gsid = {8334497321397385275}, - gscites = {8}, + gscites = {14}, + ss_id = {99042acbce8ec757d91749259821dce751a9a809}, + all_ss_ids = {['99042acbce8ec757d91749259821dce751a9a809']}, } @article{Ginn99b, @@ -7708,6 +9558,8 @@ @article{Ginn99b optnote = {DIAG, RADIOLOGY}, number = {2-3}, gscites = {65}, + ss_id = {8ae7d6146b6d9ed23f6b35f8fa23d3b467bdecef}, + all_ss_ids = {['8ae7d6146b6d9ed23f6b35f8fa23d3b467bdecef']}, } @article{Giro22, @@ -7721,6 +9573,9 @@ @article{Giro22 optnote = {DIAG}, pmid = {35441256}, year = {2022}, + ss_id = {8c882c8737d351dfe19e663228e4c3bd2cafa992}, + all_ss_ids = {['8c882c8737d351dfe19e663228e4c3bd2cafa992']}, + gscites = {22}, } @article{Gish14, @@ -7738,7 +9593,72 @@ @article{Gish14 pmid = {24712569}, month = {7}, gsid = {4001748312327312389}, - gscites = {60}, + gscites = {78}, + ss_id = {cb1b0ac31149df13be8ea554792b8379f31e3d7f}, + all_ss_ids = {['cb1b0ac31149df13be8ea554792b8379f31e3d7f']}, +} + +@article{Glas23, + author = {Glaser, Naomi and Bosman, Shannon and Madonsela, Thandanani and van Heerden, Alastair and Mashaete, Kamele and Katende, Bulemba and Ayakaka, Irene and Murphy, Keelin and Signorell, Aita and Lynen, Lutgarde and Bremerich, Jens and Reither, Klaus}, + title = {~~Incidental radiological findings during clinical tuberculosis screening in Lesotho and South Africa: a case series}, + doi = {10.1186/s13256-023-04097-4}, + year = {2023}, + abstract = {Abstract + Background + Chest X-ray offers high sensitivity and acceptable specificity as a tuberculosis screening tool, but in areas with a high burden of tuberculosis, there is often a lack of radiological expertise to interpret chest X-ray. Computer-aided detection systems based on artificial intelligence are therefore increasingly used to screen for tuberculosis-related abnormalities on digital chest radiographies. The CAD4TB software has previously been shown to demonstrate high sensitivity for chest X-ray tuberculosis-related abnormalities, but it is not yet calibrated for the detection of non-tuberculosis abnormalities. When screening for tuberculosis, users of computer-aided detection need to be aware that other chest pathologies are likely to be as prevalent as, or more prevalent than, active tuberculosis. However, non-tuberculosis chest X-ray abnormalities detected during chest X-ray screening for tuberculosis remain poorly characterized in the sub-Saharan African setting, with only minimal literature. + + Case presentation + In this case series, we report on four cases with non-tuberculosis abnormalities detected on CXR in TB TRIAGE + ACCURACY (ClinicalTrials.gov Identifier: NCT04666311), a study in adult presumptive tuberculosis cases at health facilities in Lesotho and South Africa to determine the diagnostic accuracy of two potential tuberculosis triage tests: computer-aided detection (CAD4TB v7, Delft, the Netherlands) and C-reactive protein (Alere Afinion, USA). The four Black African participants presented with the following chest X-ray abnormalities: a 59-year-old woman with pulmonary arteriovenous malformation, a 28-year-old man with pneumothorax, a 20-year-old man with massive bronchiectasis, and a 47-year-old woman with aspergilloma. + + Conclusions + Solely using chest X-ray computer-aided detection systems based on artificial intelligence as a tuberculosis screening strategy in sub-Saharan Africa comes with benefits, but also risks. Due to the limitation of CAD4TB for non-tuberculosis-abnormality identification, the computer-aided detection software may miss significant chest X-ray abnormalities that require treatment, as exemplified in our four cases. Increased data collection, characterization of non-tuberculosis anomalies and research on the implications of these diseases for individuals and health systems in sub-Saharan Africa is needed to help improve existing artificial intelligence software programs and their use in countries with high tuberculosis burden. + }, + url = {http://dx.doi.org/10.1186/s13256-023-04097-4}, + file = {Glas23.pdf:pdf\\Glas23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Medical Case Reports}, + automatic = {yes}, + citation-count = {0}, + volume = {17}, + pmid = {37620921}, +} + +@article{Gola21, + author = {Golatta, Michael and Pfob, Andr\'{e} and B\"{u}sch, Christopher and Bruckner, Thomas and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Duda, Volker and Goncalo, Manuela and Gruber, Ines and Hahn, Markus and Kapetas, Panagiotis and Ohlinger, Ralf and Rutten, Matthieu and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Rauch, Geraldine and Heil, J\"{o}rg and Barr, Richard G.}, + title = {~~The Potential of Shear Wave Elastography to Reduce Unnecessary Biopsies in Breast Cancer Diagnosis: An International, Diagnostic, Multicenter Trial}, + doi = {10.1055/a-1543-6156}, + year = {2021}, + abstract = {Abstract + Purpose In this prospective, multicenter trial we evaluated whether additional shear wave elastography (SWE) for patients with BI-RADS 3 or 4 lesions on breast ultrasound could further refine the assessment with B-mode breast ultrasound for breast cancer diagnosis. + Materials and Methods We analyzed prospective, multicenter, international data from 1288 women with breast lesions rated by conventional 2 D B-mode ultrasound as BI-RADS 3 to 4c and undergoing 2D-SWE. After reclassification with SWE the proportion of undetected malignancies should be < 2 %. All patients underwent histopathologic evaluation (reference standard). + Results Histopathologic evaluation showed malignancy in 368 of 1288 lesions (28.6 %). The assessment with B-mode breast ultrasound resulted in 1.39 % (6 of 431) undetected malignancies (malignant lesions in BI-RADS 3) and 53.80 % (495 of 920) unnecessary biopsies (biopsies in benign lesions). Re-classifying BI-RADS 4a patients with a SWE cutoff of 2.55 m/s resulted in 1.98 % (11 of 556) undetected malignancies and a reduction of 24.24 % (375 vs. 495) of unnecessary biopsies. + Conclusion A SWE value below 2.55 m/s for BI-RADS 4a lesions could be used to downstage these lesions to follow-up, and therefore reduce the number of unnecessary biopsies by 24.24 %. However, this would come at the expense of some additionally missed cancers compared to B-mode breast ultrasound (rate of undetected malignancies 1.98 %, 11 of 556, versus 1.39 %, 6 of 431) which would, however, still be in line with the ACR BI-RADS 3 definition (< 2 % of undetected malignancies).}, + url = {http://dx.doi.org/10.1055/a-1543-6156}, + file = {Gola21.pdf:pdf\\Gola21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Ultraschall in der Medizin - European Journal of Ultrasound}, + automatic = {yes}, + citation-count = {6}, + pages = {162-168}, + volume = {44}, + pmid = {34425600}, +} + +@article{Gola22, + author = {Golatta, Michael and Pfob, Andr\'{e} and B\"{u}sch, Christopher and Bruckner, Thomas and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Duda, Volker and Goncalo, Manuela and Gruber, Ines and Hahn, Markus and Kapetas, Panagiotis and Ohlinger, Ralf and Rutten, Matthieu and Togawa, Riku and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Rauch, Geraldine and Heil, Joerg and Barr, Richard G.}, + title = {~~The potential of combined shear wave and strain elastography to reduce unnecessary biopsies in breast cancer diagnostics - An international, multicentre trial}, + doi = {10.1016/j.ejca.2021.11.005}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejca.2021.11.005}, + file = {Gola22.pdf:pdf\\Gola22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Cancer}, + automatic = {yes}, + citation-count = {19}, + pages = {1-9}, + volume = {161}, + pmid = {34879299}, } @conference{Gome17, @@ -7747,15 +9667,16 @@ @conference{Gome17 booktitle = ARVO, year = {2017}, abstract = {Purpose : To assess the performance of deep learning architectures based on convolutional neural networks (CNN) for the diagnosis of glaucoma in screening campaigns using color fundus images. - - Methods : Two independent data sets were used to develop and evaluate the proposed method. 1) 805 color fundus images with a field of view of 45 degrees, centered on the macula and including the optic disc (OD) from patients with age ranging from 55 to 86 years old included in a glaucoma detection campaign performed at Hospital Esperanza (Barcelona). Annotations were performed by eight observers having 8 to 26 years of clinical experience. 2) 101 images from the publicly available Drishti-GS retinal image dataset (http://cvit.iiit.ac.in/projects/mip/drishti-gs/mip-dataset2/Home.php). The total 906 images were further organized into a training, monitoring and test set according to a 60-20-20 split. The process to train and validate the CNN had 3 steps. 1) Preprocessing: the edges and the background were blurred to reduce the effect of the bright fringe and the border. Then patches centered at the OD of size 256x256x3 pixels were automatically segmented and scaled to values from 0 to 1. 2) Implementation: The architecture consisted of ten convolutional layers (32 filters 3x3 pixels size) followed by rectified linear units and spatial max-pooling. The network ends with a fully connected layer and a soft-max classifier which outputs a score from 0 to 1. The network was trained using stochastic gradient descent and a learning rate of 0.005. To avoid overfitting data augmentation was performed applying randomly translations, flipping and rotations during the training, and dropout with probability of 0.5. 3) Monitoring and evaluation: the training was completed after 50 epochs. To evaluate the classification capabilities of the algorithm, the area under the receiver operating characteristic curve (ROC) was calculated using the training set. - - Results : An automatic classification algorithm based on CNN was developed. The present method achieved an area under the ROC of 0.894. The accuracy to identify healthy and glaucoma cases was 0.884 and 0.781 respectively, using a threshold of 0.5. - - Conclusions : The good performance of the proposed CNN architecture suggests potential usefulness of these methods for an initial automatic classification of images in screening campaigns for glaucoma.}, + + Methods : Two independent data sets were used to develop and evaluate the proposed method. 1) 805 color fundus images with a field of view of 45 degrees, centered on the macula and including the optic disc (OD) from patients with age ranging from 55 to 86 years old included in a glaucoma detection campaign performed at Hospital Esperanza (Barcelona). Annotations were performed by eight observers having 8 to 26 years of clinical experience. 2) 101 images from the publicly available Drishti-GS retinal image dataset (http://cvit.iiit.ac.in/projects/mip/drishti-gs/mip-dataset2/Home.php). The total 906 images were further organized into a training, monitoring and test set according to a 60-20-20 split. The process to train and validate the CNN had 3 steps. 1) Preprocessing: the edges and the background were blurred to reduce the effect of the bright fringe and the border. Then patches centered at the OD of size 256x256x3 pixels were automatically segmented and scaled to values from 0 to 1. 2) Implementation: The architecture consisted of ten convolutional layers (32 filters 3x3 pixels size) followed by rectified linear units and spatial max-pooling. The network ends with a fully connected layer and a soft-max classifier which outputs a score from 0 to 1. The network was trained using stochastic gradient descent and a learning rate of 0.005. To avoid overfitting data augmentation was performed applying randomly translations, flipping and rotations during the training, and dropout with probability of 0.5. 3) Monitoring and evaluation: the training was completed after 50 epochs. To evaluate the classification capabilities of the algorithm, the area under the receiver operating characteristic curve (ROC) was calculated using the training set. + + Results : An automatic classification algorithm based on CNN was developed. The present method achieved an area under the ROC of 0.894. The accuracy to identify healthy and glaucoma cases was 0.884 and 0.781 respectively, using a threshold of 0.5. + + Conclusions : The good performance of the proposed CNN architecture suggests potential usefulness of these methods for an initial automatic classification of images in screening campaigns for glaucoma.}, optnote = {DIAG, RADIOLOGY}, gsid = {11583482517657678688}, gscites = {2}, + all_ss_ids = {8b92dcfb8d8b92314d63de92852a28880a81f4ea}, } @article{Gome19, @@ -7775,7 +9696,9 @@ @article{Gome19 pmid = {30800522}, publisher = {OSA}, gsid = {5163022841600007243}, - gscites = {29}, + gscites = {127}, + ss_id = {b5f1ef811b206720957f6c4412c9a2ad9bf480c9}, + all_ss_ids = {['b5f1ef811b206720957f6c4412c9a2ad9bf480c9']}, } @inproceedings{Gonz18, @@ -7788,7 +9711,9 @@ @inproceedings{Gonz18 file = {:pdf/Gonz18.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {14923642687050576388}, - gscites = {3}, + gscites = {4}, + ss_id = {05078a4b34fbf98a940b3ca7f97773632c787397}, + all_ss_ids = {['05078a4b34fbf98a940b3ca7f97773632c787397']}, } @conference{Gonz19a, @@ -7797,14 +9722,16 @@ @conference{Gonz19a url = {https://iovs.arvojournals.org/article.aspx?articleid=2746850}, title = {Opening the "black box" of deep learning in automated screening of eye diseases}, abstract = {Purpose: Systems based on deep learning (DL) have demonstrated to provide a scalable and high-performance solution for screening of eye diseases. However, DL is usually considered a "black box? due to lack of interpretability. We propose a deep visualization framework to explain the decisions made by a DL system, iteratively unveiling abnormalities responsible for referable predictions without needing lesion-level annotations. We apply the framework to automated screening of diabetic retinopathy (DR) in color fundus images (CFIs). - - Methods: The proposed framework consists of a baseline deep convolutional neural network to classify CFIs by DR stage. For each CFI classified as referable DR, the framework extracts initial visual evidence of the predicted stage by computing a saliency map, which indicates regions in the image that would contribute the most to changes in the prediction if modified. This provides localization of abnormalities that are then removed through selective inpainting. The image is again classified, expecting reduced referability. We iteratively apply this procedure to increase attention to less discriminative areas and generate refined visual evidence. The Kaggle DR database, with CFIs graded regarding DR severity (stages 0 and 1: non-referable DR, stages 2 to 4: referable DR), is used for training and validation of the image-level classification task. For validation of the obtained visual evidence, we used the DiaretDB1 dataset, which contains CFIs with manually-delineated areas for 4 types of lesions: hemorrhages, microaneurysms, hard and soft exudates. - - Results: The baseline classifier obtained an area under the Receiver Operating Characteristic (ROC) curve of 0.93 and a quadratic weighted kappa of 0.77 on the Kaggle test set (53576 CFIs). Free-response ROC (FROC) curves (Figure 2) analyze the correspondence between highlighted areas and each type of lesion for those images classified as referable DR in the DiaretDB1 dataset (62 CFIs), comparing between initial and refined visual evidence. - - Conclusions : The proposed framework provides visual evidence for the decisions made by a DL system, iteratively unveiling abnormalities in CFIs based on the prediction of a classifier trained only with image-level labels. This provides a "key? to open the "black box? of artificial intelligence in screening of eye diseases, aiming to increase experts' trust and facilitate its integration in screening settings.}, + + Methods: The proposed framework consists of a baseline deep convolutional neural network to classify CFIs by DR stage. For each CFI classified as referable DR, the framework extracts initial visual evidence of the predicted stage by computing a saliency map, which indicates regions in the image that would contribute the most to changes in the prediction if modified. This provides localization of abnormalities that are then removed through selective inpainting. The image is again classified, expecting reduced referability. We iteratively apply this procedure to increase attention to less discriminative areas and generate refined visual evidence. The Kaggle DR database, with CFIs graded regarding DR severity (stages 0 and 1: non-referable DR, stages 2 to 4: referable DR), is used for training and validation of the image-level classification task. For validation of the obtained visual evidence, we used the DiaretDB1 dataset, which contains CFIs with manually-delineated areas for 4 types of lesions: hemorrhages, microaneurysms, hard and soft exudates. + + Results: The baseline classifier obtained an area under the Receiver Operating Characteristic (ROC) curve of 0.93 and a quadratic weighted kappa of 0.77 on the Kaggle test set (53576 CFIs). Free-response ROC (FROC) curves (Figure 2) analyze the correspondence between highlighted areas and each type of lesion for those images classified as referable DR in the DiaretDB1 dataset (62 CFIs), comparing between initial and refined visual evidence. + + Conclusions : The proposed framework provides visual evidence for the decisions made by a DL system, iteratively unveiling abnormalities in CFIs based on the prediction of a classifier trained only with image-level labels. This provides a "key? to open the "black box? of artificial intelligence in screening of eye diseases, aiming to increase experts' trust and facilitate its integration in screening settings.}, optnote = {DIAG, RADIOLOGY}, year = {2019}, + all_ss_ids = {80af090645088134f058db53a708b7092dd28786}, + gscites = {0}, } @article{Gonz19b, @@ -7830,6 +9757,9 @@ @article{Gonz20 abstract = {Interpretability of deep learning (DL) systems is gaining attention in medical imaging to increase experts' trust in the obtained predictions and facilitate their integration in clinical settings. We propose a deep visualization method to generate interpretability of DL classification tasks in medical imaging by means of visual evidence augmentation. The proposed method iteratively unveils abnormalities based on the prediction of a classifier trained only with image-level labels. For each image, initial visual evidence of the prediction is extracted with a given visual attribution technique. This provides localization of abnormalities that are then removed through selective inpainting. We iteratively apply this procedure until the system considers the image as normal. This yields augmented visual evidence, including less discriminative lesions which were not detected at first but should be considered for final diagnosis. We apply the method to grading of two retinal diseases in color fundus images: diabetic retinopathy (DR) and age-related macular degeneration (AMD). We evaluate the generated visual evidence and the performance of weakly-supervised localization of different types of DR and AMD abnormalities, both qualitatively and quantitatively. We show that the augmented visual evidence of the predictions highlights the biomarkers considered by experts for diagnosis and improves the final localization performance. It results in a relative increase of 11.2+-2.0% per image regarding sensitivity averaged at 10 false positives/image on average, when applied to different classification tasks, visual attribution techniques and network architectures. This makes the proposed method a useful tool for exhaustive visual support of DL classifiers in medical imaging.}, file = {Gonz20.pdf:pdf\\Gonz20.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {44cb242be984781ad380e9f785157c13384d0026}, + all_ss_ids = {['44cb242be984781ad380e9f785157c13384d0026']}, + gscites = {20}, } @article{Gonz20a, @@ -7847,7 +9777,9 @@ @article{Gonz20a year = {2020}, month = {11}, gsid = {12212989465195722761}, - gscites = {9}, + gscites = {59}, + ss_id = {d0760faf253e27b6c105d76d06acc4c6ab3674c7}, + all_ss_ids = {['d0760faf253e27b6c105d76d06acc4c6ab3674c7']}, } @conference{Gonz20c, @@ -7856,15 +9788,15 @@ @conference{Gonz20c url = {https://www.euretina.org/congress/amsterdam-2020/virtual-2020-freepapers/}, title = {Are adversarial attacks an actual threat for deep learning systems in real-world eye disease screening settings?}, abstract = {Purpose: - Deep learning (DL) systems that perform image-level classification with convolutional neural networks (CNNs) have been shown to provide high-performance solutions for automated screening of eye diseases. Nevertheless, adversarial attacks have been recently screening settings, where there is restricted access to the systems and limited knowledge about certain factors, such as their CNN architecture or the data used for development. - Setting: - Deep learning for automated screening of eye diseases. - Methods: - We used the Kaggle dataset for diabetic retinopathy detection. It contains 88,702 manually-labelled color fundus images, which we split into test (12%) and development (88%). Development data were split into two equally-sized sets (d1 and d2); a third set (d3) was generated using half of the images in d2. In each development set, 80%/20% of the images were used for training/validation. All splits were done randomly at patient-level. As attacked system, we developed a randomly-initialized CNN based on the Inception-v3 architecture using d1. We performed the attacks (1) in a white-box (WB) setting, with full access to the attacked system to generate the adversarial images, and (2) in black-box (BB) settings, without access to the attacked system and using a surrogate system to craft the attacks. We simulated different BB settings, sequentially decreasing the available knowledge about the attacked system: same architecture, using d1 (BB-1); different architecture (randomly-initialized DenseNet-121), using d1 (BB-2); same architecture, using d2 (BB-3); different architecture, using d2 (BB-4); different architecture, using d3 (BB-5). In each setting, adversarial images containing non-perceptible noise were generated by applying the fast gradient sign method to each image of the test set and processed by the attacked system. - Results: - The performance of the attacked system to detect referable diabetic retinopathy without attacks and under the different attack settings was measured on the test set using the area under the receiver operating characteristic curve (AUC). Without attacks, the system achieved an AUC of 0.88. In each attack setting, the relative decrease in AUC with respect to the original performance was computed. In the WB setting, there was a 99.9% relative decrease in performance. In the BB-1 setting, the relative decrease in AUC was 67.3%. In the BB-2 setting, the AUC suffered a 40.2% relative decrease. In the BB-3 setting, the relative decrease was 37.9%. In the BB-4 setting, the relative decrease in AUC was 34.1%. Lastly, in the BB-5 setting, the performance of the attacked system decreased 3.8% regarding its original performance. - Conclusions: - The results obtained in the different settings show a drastic decrease of the attacked DL system's vulnerability to adversarial attacks when the access and knowledge about it are limited. The impact on performance is extremely reduced when restricting the direct access to the system (from the WB to the BB-1 setting). The attacks become slightly less effective when not having access to the same development data (BB-3), compared to not using the same CNN architecture (BB-2). Attacks' effectiveness further decreases when both factors are unknown (BB-4). If the amount of development data is additionally reduced (BB-5), the original performance barely deteriorates. This last setting is the most similar to realistic screening settings, since most systems are currently closed source and use additional large private datasets for development. In conclusion, these factors should be acknowledged for future development of robust DL systems, as well as considered when evaluating the vulnerability of currently-available systems to adversarial attacks. Having limited access and knowledge about the systems determines the actual threat these attacks pose. We believe awareness about this matter will increase experts' trust and facilitate the integration of DL systems in real-world settings.}, + Deep learning (DL) systems that perform image-level classification with convolutional neural networks (CNNs) have been shown to provide high-performance solutions for automated screening of eye diseases. Nevertheless, adversarial attacks have been recently screening settings, where there is restricted access to the systems and limited knowledge about certain factors, such as their CNN architecture or the data used for development. + Setting: + Deep learning for automated screening of eye diseases. + Methods: + We used the Kaggle dataset for diabetic retinopathy detection. It contains 88,702 manually-labelled color fundus images, which we split into test (12%) and development (88%). Development data were split into two equally-sized sets (d1 and d2); a third set (d3) was generated using half of the images in d2. In each development set, 80%/20% of the images were used for training/validation. All splits were done randomly at patient-level. As attacked system, we developed a randomly-initialized CNN based on the Inception-v3 architecture using d1. We performed the attacks (1) in a white-box (WB) setting, with full access to the attacked system to generate the adversarial images, and (2) in black-box (BB) settings, without access to the attacked system and using a surrogate system to craft the attacks. We simulated different BB settings, sequentially decreasing the available knowledge about the attacked system: same architecture, using d1 (BB-1); different architecture (randomly-initialized DenseNet-121), using d1 (BB-2); same architecture, using d2 (BB-3); different architecture, using d2 (BB-4); different architecture, using d3 (BB-5). In each setting, adversarial images containing non-perceptible noise were generated by applying the fast gradient sign method to each image of the test set and processed by the attacked system. + Results: + The performance of the attacked system to detect referable diabetic retinopathy without attacks and under the different attack settings was measured on the test set using the area under the receiver operating characteristic curve (AUC). Without attacks, the system achieved an AUC of 0.88. In each attack setting, the relative decrease in AUC with respect to the original performance was computed. In the WB setting, there was a 99.9% relative decrease in performance. In the BB-1 setting, the relative decrease in AUC was 67.3%. In the BB-2 setting, the AUC suffered a 40.2% relative decrease. In the BB-3 setting, the relative decrease was 37.9%. In the BB-4 setting, the relative decrease in AUC was 34.1%. Lastly, in the BB-5 setting, the performance of the attacked system decreased 3.8% regarding its original performance. + Conclusions: + The results obtained in the different settings show a drastic decrease of the attacked DL system's vulnerability to adversarial attacks when the access and knowledge about it are limited. The impact on performance is extremely reduced when restricting the direct access to the system (from the WB to the BB-1 setting). The attacks become slightly less effective when not having access to the same development data (BB-3), compared to not using the same CNN architecture (BB-2). Attacks' effectiveness further decreases when both factors are unknown (BB-4). If the amount of development data is additionally reduced (BB-5), the original performance barely deteriorates. This last setting is the most similar to realistic screening settings, since most systems are currently closed source and use additional large private datasets for development. In conclusion, these factors should be acknowledged for future development of robust DL systems, as well as considered when evaluating the vulnerability of currently-available systems to adversarial attacks. Having limited access and knowledge about the systems determines the actual threat these attacks pose. We believe awareness about this matter will increase experts' trust and facilitate the integration of DL systems in real-world settings.}, optnote = {DIAG, RADIOLOGY}, year = {2020}, month = {9}, @@ -7876,32 +9808,32 @@ @conference{Gonz21 url = {https://iovs.arvojournals.org/article.aspx?articleid=2773295}, title = {Hierarchical curriculum learning for robust automated detection of low-prevalence retinal disease features: application to reticular pseudodrusen}, abstract = {Purpose: The low prevalence of certain retinal disease features compromises data collection for deep neural networks (DNN) development and, consequently, the benefits of automated detection. We robustify the detection of such features in scarce data settings by exploiting hierarchical information available in the data to learn from generic to specific, low-prevalence features. We focus on reticular pseudodrusen (RPD), a hallmark of intermediate age-related macular degeneration (AMD). - - Methods: Color fundus images (CFI) from the AREDS dataset were used for DNN development (106,994 CFI) and testing (27,066 CFI). An external test set (RS1-6) was generated with 2,790 CFI from the Rotterdam Study. In both datasets CFI were graded from generic to specific features. This allows to establish a hierarchy of binary classification tasks with decreasing prevalence: presence of AMD findings (AREDS prevalence: 88%; RS1-6: 77%), drusen (85%; 73%), large drusen (40%; 24%), RPD (1%; 4%). We created a hierarchical curriculum and developed a DNN (HC-DNN) that learned each task sequentially. We computed its performance for RPD detection in both test sets and compared it to a baseline DNN (B-DNN) that learned to detect RPD from scratch disregarding hierarchical information. We studied their robustness across datasets, while reducing the size of data available for development (same prevalences) - - Results: Area under the receiver operating characteristic curve (AUC) was used to measure RPD detection performance. When large development data were available, there was no significant difference between DNNs (100% data, HC-DNN: 0.96 (95% CI, 0.94-0.97) in AREDS, 0.82 (0.78-0.86) in RS1-6; B-DNN: 0.95 (0.94-0.96) in AREDS, 0.83 (0.79-0.87) in RS1-6). However, HC-DNN achieved better performance and robustness across datasets when development data were highly reduced (<50% data, p-values<0.05) (1% data, HC-DNN: 0.63 (0.60-0.66) in AREDS, 0.76 (0.72-0.80) in RS1-6; B-DNN: 0.53 (0.49-0.56) in AREDS, 0.48 (0.42-0.53) in RS1-6). - - Conclusions: Hierarchical curriculum learning allows for knowledge transfer from general, higher-prevalence features and becomes beneficial for the detection of low-prevalence retinal features, such as RPD, in scarce data settings. Moreover, exploiting hierarchical information improves DNN robustness across datasets.}, + + Methods: Color fundus images (CFI) from the AREDS dataset were used for DNN development (106,994 CFI) and testing (27,066 CFI). An external test set (RS1-6) was generated with 2,790 CFI from the Rotterdam Study. In both datasets CFI were graded from generic to specific features. This allows to establish a hierarchy of binary classification tasks with decreasing prevalence: presence of AMD findings (AREDS prevalence: 88%; RS1-6: 77%), drusen (85%; 73%), large drusen (40%; 24%), RPD (1%; 4%). We created a hierarchical curriculum and developed a DNN (HC-DNN) that learned each task sequentially. We computed its performance for RPD detection in both test sets and compared it to a baseline DNN (B-DNN) that learned to detect RPD from scratch disregarding hierarchical information. We studied their robustness across datasets, while reducing the size of data available for development (same prevalences) + + Results: Area under the receiver operating characteristic curve (AUC) was used to measure RPD detection performance. When large development data were available, there was no significant difference between DNNs (100% data, HC-DNN: 0.96 (95% CI, 0.94-0.97) in AREDS, 0.82 (0.78-0.86) in RS1-6; B-DNN: 0.95 (0.94-0.96) in AREDS, 0.83 (0.79-0.87) in RS1-6). However, HC-DNN achieved better performance and robustness across datasets when development data were highly reduced (<50% data, p-values<0.05) (1% data, HC-DNN: 0.63 (0.60-0.66) in AREDS, 0.76 (0.72-0.80) in RS1-6; B-DNN: 0.53 (0.49-0.56) in AREDS, 0.48 (0.42-0.53) in RS1-6). + + Conclusions: Hierarchical curriculum learning allows for knowledge transfer from general, higher-prevalence features and becomes beneficial for the detection of low-prevalence retinal features, such as RPD, in scarce data settings. Moreover, exploiting hierarchical information improves DNN robustness across datasets.}, optnote = {DIAG, RADIOLOGY}, year = {2021}, } -@Conference{Gonz21a, - author = {Gonz\'{a}lez-Gonzalo, Cristina and Thee, Eric F. and Liefers, Bart and Klaver, Caroline C.W. and S\'{a}nchez, Clara I.}, +@conference{Gonz21a, + author = {Gonz\'{a}lez-Gonzalo, Cristina and Thee, Eric F. and Liefers, Bart and Klaver, Caroline C.W. and S\'{a}nchez, Clara I.}, booktitle = EURETINA, - title = {Deep learning for automated stratification of ophthalmic images: Application to age-related macular degeneration and color fundus images}, - url = {https://euretina.org/resource/abstract_2021_deep-learning-for-automated-stratification-of-ophthalmic-images-application-to-age-related-macular-degeneration-and-color-fundus-images/}, - abstract = {Purpose: Deep learning (DL) systems based on convolutional neural networks (CNNs) have achieved expert-level performance in different classification tasks, and have shown the potential to reduce current experts' workload significantly. We explore this potential in the context of automated stratification of ophthalmic images. DL could accelerate the setup of clinical studies by filtering large amounts of images or patients based on specific inclusion criteria, as well as aid in patient selection for clinical trials. DL could also allow for automated categorization of entering images in busy clinical or screening settings, enhancing data triaging, searching, retrieval, and comparison. Automated stratification could also facilitate data collection and application of further DL-based phenotyping analysis, by generating useful sets of images for expert annotation, training, or testing of segmentation algorithms. In our work, we focus on the stratification of color fundus images (CFI) based on multiple features related to age-related macular degeneration (AMD) at different hierarchical levels. We further analyze the robustness of the automated stratification system when the amount of data available for development is limited. We performed our validation on two different population studies. - - Setting/Venue: Deep learning applied to ophthalmic imaging. - - Methods: Automated stratification of CFI was performed based on the presence or absence of the following AMD features, following a hierarchical tree with different branches (Bi) and levels (Hi) from generic features (H0) to specific features (H3): AMD findings (H0); B1: drusen (H1), large drusen (H2), reticular pseudodrusen (H3); B2: pigmentary changes (H1), hyperpigmentation (H2), hypopigmentation (H2); B3: late AMD (H1), geographic atrophy (H2), choroidal neovascularization (H2). The automated stratification system consisted of a set of CNNs (based on the Inception-v3 architecture) able to classify the multiple AMD features (presence/absence) at higher and lower levels. This allowed to automatically stratify incoming CFI into the hierarchical tree. CFI from the AREDS dataset were used for development (106,994 CFI) and testing (27,066 CFI) of the CNNs. We validated the robustness of the system to a gradual decrease in the amount of data available for development (100%, 75%, 50%, 25%, 10%, 5%, 2.5%, and 1% of development data). An external test set (RS1-6) was generated with 2,790 CFI from the Rotterdam Study. This allowed to validate the performance of the automated stratification across studies where different CFI grading protocols were used. - - Results: Area under the receiver operating characteristic curve (AUC) was used to measure the performance of each feature's classification within the automated stratification. The AUC averaged across AMD features when 100% of development data was available was 93.8% (95% CI, 93.4%-94.2%) in AREDS and 84.4% (82.1%-86.5%) in RS1-6. There was an average relative decrease in performance of 10.0+-4.7% between AREDS and the external test set, RS1-6. The performance of the system decreased gradually with each development data reduction. When only 1% of data was available for development, the average AUC was 81.9% (81.0%-82.8%) in AREDS and 74.0% (70.8%-77.0%) in RS1-6. This corresponded to an average relative decrease in performance of 12.7+-13.2% in AREDS and 12.6+-7.8% in RS1-6. - - Conlusions: The automated stratification system achieved overall high performance in the classification of different features independently of their hierarchical level. This shows the potential of DL systems to identify diverse phenotypes and to obtain an accurate automated stratification of CFI. The results showed that automated stratification was also robust to a dramatic reduction in the data available for development, maintaining the average AUC above 80%. This is a positive observation, considering that the amount of data available for DL development can be limited in some settings, and the gradings can be costly to obtain. Nevertheless, variability in performance across features could be observed, especially for those with very low prevalence, such as reticular pseudodrusen, where performance became more unstable when few data were available. The external validation showed these observations held when the automated stratification was applied in a different population study, with an expected (but not drastic) drop of performance due to differences between datasets and their grading protocols. In conclusion, our work supports that DL is a powerful tool for the filtering and stratification of ophthalmic images, and has the potential to reduce the workload of experts while supporting them in research and clinical settings.}, - optnote = {DIAG, RADIOLOGY}, - year = {2021}, + title = {Deep learning for automated stratification of ophthalmic images: Application to age-related macular degeneration and color fundus images}, + url = {https://euretina.org/resource/abstract_2021_deep-learning-for-automated-stratification-of-ophthalmic-images-application-to-age-related-macular-degeneration-and-color-fundus-images/}, + abstract = {Purpose: Deep learning (DL) systems based on convolutional neural networks (CNNs) have achieved expert-level performance in different classification tasks, and have shown the potential to reduce current experts' workload significantly. We explore this potential in the context of automated stratification of ophthalmic images. DL could accelerate the setup of clinical studies by filtering large amounts of images or patients based on specific inclusion criteria, as well as aid in patient selection for clinical trials. DL could also allow for automated categorization of entering images in busy clinical or screening settings, enhancing data triaging, searching, retrieval, and comparison. Automated stratification could also facilitate data collection and application of further DL-based phenotyping analysis, by generating useful sets of images for expert annotation, training, or testing of segmentation algorithms. In our work, we focus on the stratification of color fundus images (CFI) based on multiple features related to age-related macular degeneration (AMD) at different hierarchical levels. We further analyze the robustness of the automated stratification system when the amount of data available for development is limited. We performed our validation on two different population studies. + + Setting/Venue: Deep learning applied to ophthalmic imaging. + + Methods: Automated stratification of CFI was performed based on the presence or absence of the following AMD features, following a hierarchical tree with different branches (Bi) and levels (Hi) from generic features (H0) to specific features (H3): AMD findings (H0); B1: drusen (H1), large drusen (H2), reticular pseudodrusen (H3); B2: pigmentary changes (H1), hyperpigmentation (H2), hypopigmentation (H2); B3: late AMD (H1), geographic atrophy (H2), choroidal neovascularization (H2). The automated stratification system consisted of a set of CNNs (based on the Inception-v3 architecture) able to classify the multiple AMD features (presence/absence) at higher and lower levels. This allowed to automatically stratify incoming CFI into the hierarchical tree. CFI from the AREDS dataset were used for development (106,994 CFI) and testing (27,066 CFI) of the CNNs. We validated the robustness of the system to a gradual decrease in the amount of data available for development (100%, 75%, 50%, 25%, 10%, 5%, 2.5%, and 1% of development data). An external test set (RS1-6) was generated with 2,790 CFI from the Rotterdam Study. This allowed to validate the performance of the automated stratification across studies where different CFI grading protocols were used. + + Results: Area under the receiver operating characteristic curve (AUC) was used to measure the performance of each feature's classification within the automated stratification. The AUC averaged across AMD features when 100% of development data was available was 93.8% (95% CI, 93.4%-94.2%) in AREDS and 84.4% (82.1%-86.5%) in RS1-6. There was an average relative decrease in performance of 10.0+-4.7% between AREDS and the external test set, RS1-6. The performance of the system decreased gradually with each development data reduction. When only 1% of data was available for development, the average AUC was 81.9% (81.0%-82.8%) in AREDS and 74.0% (70.8%-77.0%) in RS1-6. This corresponded to an average relative decrease in performance of 12.7+-13.2% in AREDS and 12.6+-7.8% in RS1-6. + + Conlusions: The automated stratification system achieved overall high performance in the classification of different features independently of their hierarchical level. This shows the potential of DL systems to identify diverse phenotypes and to obtain an accurate automated stratification of CFI. The results showed that automated stratification was also robust to a dramatic reduction in the data available for development, maintaining the average AUC above 80%. This is a positive observation, considering that the amount of data available for DL development can be limited in some settings, and the gradings can be costly to obtain. Nevertheless, variability in performance across features could be observed, especially for those with very low prevalence, such as reticular pseudodrusen, where performance became more unstable when few data were available. The external validation showed these observations held when the automated stratification was applied in a different population study, with an expected (but not drastic) drop of performance due to differences between datasets and their grading protocols. In conclusion, our work supports that DL is a powerful tool for the filtering and stratification of ophthalmic images, and has the potential to reduce the workload of experts while supporting them in research and clinical settings.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, } @conference{Gonz21b, @@ -7910,20 +9842,20 @@ @conference{Gonz21b url = {https://journals.sagepub.com/doi/full/10.1177/11206721211047031}, title = {Trustworthy AI: closing the gap between development and integration of AI in Ophthalmology}, abstract = {Design: Descriptive study. - - Purpose: To identify the main aspects that currently complicate the integration of artificial intelligence (AI) in ophthalmic settings. - - Methods: Based on an extensive review of state-of-the-art literature of AI applied to Ophthalmology plus interviews with multidisciplinary, international experts, we identified the most relevant aspects to consider during AI design to generate trustworthy (i.e., transparent, robust, and sustainable) AI systems and, consequently, facilitate a subsequent successful integration in real-world ophthalmic settings. - - Results: Several essential aspects to consider were identified: - 1) The reliability of the human annotations that are used for establishing the reference standard an AI system learns from, or for setting robust observer studies that allow for fair human-AI performance comparison. - 2) The ability of an AI system to generalize across populations, ophthalmic settings, and data acquisition protocols in order to avoid the negative consequences of algorithmic bias and lack of domain adaptation. - 3)The integration of multimodal data for AI development to consider multiple contexts when available (phenotyping, genotyping, systemic variables, patient medical history...). - 4) The importance of providing interpretable AI-based predictions to open the "black box" and increase trust and clinical usability. - 5) A plan to monitor the impact of AI on the clinical workflow, i.e., the adaptation of healthcare providers and patients to the new technology, human-AI interaction, cost-benefit analyses... - 6) The necessity to update current regulations to accelerate and control AI integration and all related aspects, such as patient privacy, systems' updates, and liability. - - Conclusions: It is important that healthcare providers in Ophthalmology consider these aspects and their consequences when thinking of AI in practice. It is key that all involved stakeholders collaborate and interact from the beginning of the AI design process to ensure a good alignment with real-world clinical needs and settings. This way, it will be possible to generate trustworthy AI solutions and close the gap between development and deployment, so that the AI benefits currently shown on paper reach the final users.}, + + Purpose: To identify the main aspects that currently complicate the integration of artificial intelligence (AI) in ophthalmic settings. + + Methods: Based on an extensive review of state-of-the-art literature of AI applied to Ophthalmology plus interviews with multidisciplinary, international experts, we identified the most relevant aspects to consider during AI design to generate trustworthy (i.e., transparent, robust, and sustainable) AI systems and, consequently, facilitate a subsequent successful integration in real-world ophthalmic settings. + + Results: Several essential aspects to consider were identified: + 1) The reliability of the human annotations that are used for establishing the reference standard an AI system learns from, or for setting robust observer studies that allow for fair human-AI performance comparison. + 2) The ability of an AI system to generalize across populations, ophthalmic settings, and data acquisition protocols in order to avoid the negative consequences of algorithmic bias and lack of domain adaptation. + 3)The integration of multimodal data for AI development to consider multiple contexts when available (phenotyping, genotyping, systemic variables, patient medical history...). + 4) The importance of providing interpretable AI-based predictions to open the "black box" and increase trust and clinical usability. + 5) A plan to monitor the impact of AI on the clinical workflow, i.e., the adaptation of healthcare providers and patients to the new technology, human-AI interaction, cost-benefit analyses... + 6) The necessity to update current regulations to accelerate and control AI integration and all related aspects, such as patient privacy, systems' updates, and liability. + + Conclusions: It is important that healthcare providers in Ophthalmology consider these aspects and their consequences when thinking of AI in practice. It is key that all involved stakeholders collaborate and interact from the beginning of the AI design process to ensure a good alignment with real-world clinical needs and settings. This way, it will be possible to generate trustworthy AI solutions and close the gap between development and deployment, so that the AI benefits currently shown on paper reach the final users.}, optnote = {DIAG, RADIOLOGY}, year = {2021}, } @@ -7937,6 +9869,9 @@ @article{Gonz21c year = {2021}, abstract = {An increasing number of artificial intelligence (AI) systems are being proposed in ophthalmology, motivated by the variety and amount of clinical and imaging data, as well as their potential benefits at the different stages of patient care. Despite achieving close or even superior performance to that of experts, there is a critical gap between development and integration of AI systems in ophthalmic practice. This work focuses on the importance of trustworthy AI to close that gap. We identify the main aspects or challenges that need to be considered along the AI design pipeline so as to generate systems that meet the requirements to be deemed trustworthy, including those concerning accuracy, resiliency, reliability, safety, and accountability. We elaborate on mechanisms and considerations to address those aspects or challenges, and define the roles and responsibilities of the different stakeholders involved in AI for ophthalmic care, i.e., AI developers, reading centers, healthcare providers, healthcare institutions, ophthalmological societies and working groups or committees, patients, regulatory bodies, and payers. Generating trustworthy AI is not a responsibility of a sole stakeholder. There is an impending necessity for a collaborative approach where the different stakeholders are represented along the AI design pipeline, from the definition of the intended use to post-market surveillance after regulatory approval. This work contributes to establish such multi-stakeholder interaction and the main action points to be taken so that the potential benefits of AI reach real-world ophthalmic settings.}, optnote = {DIAG, INPRESS}, + ss_id = {14ac8b3d719781be45a46e7d33fdee702eecce2d}, + all_ss_ids = {['14ac8b3d719781be45a46e7d33fdee702eecce2d']}, + gscites = {25}, } @article{Goud20, @@ -7952,6 +9887,44 @@ @article{Goud20 url = {https://www.bjutijdschriften.nl/doi/10.5553/TvT/187987052020011001008}, file = {Goud20.pdf:pdf\\Goud20.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {83307292dabff3b5669d5df253867f1d47f5363f}, + all_ss_ids = {['83307292dabff3b5669d5df253867f1d47f5363f']}, + gscites = {0}, +} + +@conference{Graa22, + title = {Segmentation of vertebrae and intervertebral discs in lumbar spine MR images with iterative instance segmentation}, + author = {van der Graaf, Jasper W and van Hooff, Miranda L and Buckens, Constantinus FM and Lessmann, Nikolas}, + booktitle = {Medical Imaging 2022: Image Processing}, + volume = {12032}, + pages = {909--913}, + year = {2022}, + abstract = {Segmentation of vertebrae and intervertebral discs (IVD) in MR images are important steps for automatic image analysis. This paper proposes an extension of an iterative vertebra segmentation method that relies on a 3D fully-convolutional neural network to segment the vertebrae one-by-one. We augment this approach with an additional segmentation step following each vertebra detection to also segment the IVD below each vertebra. To train and test the algorithm, we collected and annotated T2-weighted sagittal lumbar spine MR scans of 53 patients. The presented approach achieved a mean Dice score of 93 % +- 2 % for vertebra segmentation and 86 % +- 7 % for IVD segmentation. The method was able to cope with pathological abnormalities such as compression fractures, Schmorl's nodes and collapsed IVDs. In comparison, a similar network trained for IVD segmentation without knowledge of the adjacent vertebra segmentation result did not detect all IVDs (89 %) and also achieved a lower Dice score of 83 % +- 9 %. These results indicate that combining IVD segmentation with vertebra segmentation in lumbar spine MR images can help to improve the detection and segmentation performance compared with separately segmenting these structures.}, + doi = {10.1117/12.2611423}, + all_ss_ids = {a46c8e13b227cfc9d208915fdc79a6aff9fc58ea}, + gscites = {1}, +} + +@article{Graa23, + title = {MRI image features with an evident relation to low back pain: a narrative review}, + author = {van der Graaf, Jasper W and Kroeze, Robert Jan and Buckens, Constantinus FM and Lessmann, Nikolas and van Hooff, Miranda L}, + journal = {European Spine Journal}, + pages = {1--12}, + year = {2023}, + publisher = {Springer}, + abstract = {Purpose: Low back pain (LBP) is one of the most prevalent health condition worldwide and responsible for the most years lived with disability, yet the etiology is often unknown. Magnetic resonance imaging (MRI) is frequently used for treatment decision even though it is often inconclusive. There are many different image features that could relate to low back pain. Conversely, multiple etiologies do relate to spinal degeneration but do not actually cause the perceived pain. This narrative review provides an overview of all possible relevant features visible on MRI images and determines their relation to LBP. Methods: We conducted a separate literature search per image feature. All included studies were scored using the GRADE guidelines. Based on the reported results per feature an evidence agreement (EA) score was provided, enabling us to compare the collected evidence of separate image features. The various relations between MRI features and their associated pain mechanisms were evaluated to provide a list of features that are related to LBP. Results: All searches combined generated a total of 4472 hits of which 31 articles were included. Features were divided into five different categories:'discogenic', 'neuropathic','osseous', 'facetogenic', and'paraspinal', and discussed separately. Conclusion: Our research suggests that type I Modic changes, disc degeneration, endplate defects, disc herniation, spinal canal stenosis, nerve compression, and muscle fat infiltration have the highest probability to be related to LBP. These can be used to improve clinical decision-making for patients with LBP based on MRI.}, + doi = {10.1007/s00586-023-07602-x}, +} + +@article{Graa23a, + author = {van der Graaf, Jasper W. and van Hooff, Miranda L. and Buckens, Constantinus F. M. and Rutten, Matthieu and van Susante, Job L. C. and Kroeze, Robert Jan and de Kleuver, Marinus and van Ginneken, Bram and Lessmann, Nikolas}, + title = {Lumbar spine segmentation in MR images: a dataset and a public benchmark}, + journal = {arXiv:2306.12217}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {aafec95246c8d87c5d49c368220e29c8fed9775a}, + all_ss_ids = {['aafec95246c8d87c5d49c368220e29c8fed9775a']}, + gscites = {0}, } @article{Grae93, @@ -8005,6 +9978,23 @@ @conference{Grau22b optnote = {DIAG, RADIOLOGY}, } +@article{Gray17, + author = {Gray, Ewan and Donten, Anna and Karssemeijer, Nico and van Gils, Carla and Evans, D. Gareth and Astley, Sue and Payne, Katherine}, + title = {~~Evaluation of a Stratified National Breast Screening Program in the United Kingdom: An Early Model-Based Cost-Effectiveness Analysis}, + doi = {10.1016/j.jval.2017.04.012}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.jval.2017.04.012}, + file = {Gray17.pdf:pdf\\Gray17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Value in Health}, + automatic = {yes}, + citation-count = {44}, + pages = {1100-1109}, + volume = {20}, + pmid = {28964442}, +} + @article{Gree16, author = {H. Greenspan and R. M. Summers and B. van Ginneken}, title = {Deep Learning in Medical Imaging: Overview and Future Promise of an Exciting New Technique}, @@ -8018,6 +10008,8 @@ @article{Gree16 optnote = {DIAG, RADIOLOGY}, month = {5}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/172640}, + all_ss_ids = {1d2109f8ec43c23db647c4778a5bb5846074e575}, + gscites = {125}, } @conference{Grin12, @@ -8053,8 +10045,10 @@ @article{Grin13 pmid = {23572106}, month = {4}, gsid = {4549341628942225242}, - gscites = {30}, + gscites = {40}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/118034}, + ss_id = {b47eb89360053492a2c57e7474301f12260ac1ea}, + all_ss_ids = {['b47eb89360053492a2c57e7474301f12260ac1ea']}, } @inproceedings{Grin13a, @@ -8071,7 +10065,9 @@ @inproceedings{Grin13a optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {17026275504420456787}, - gscites = {4}, + gscites = {5}, + ss_id = {d80fa93bf60f2f3635eec2903b09e9848ecacf03}, + all_ss_ids = {['d80fa93bf60f2f3635eec2903b09e9848ecacf03']}, } @inproceedings{Grin13b, @@ -8086,7 +10082,9 @@ @inproceedings{Grin13b optnote = {DIAG, RADIOLOGY}, month = {4}, gsid = {17742882494926958127}, - gscites = {20}, + gscites = {24}, + ss_id = {8be6e1c3b1224ea96f6ddbfde8153e32ab9a1f75}, + all_ss_ids = {['8be6e1c3b1224ea96f6ddbfde8153e32ab9a1f75']}, } @conference{Grin14, @@ -8113,8 +10111,10 @@ @article{Grin15 pmid = {25574052}, month = {1}, gsid = {2566342930661349456}, - gscites = {21}, + gscites = {40}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/154610}, + ss_id = {2da30c95504bfe2cb1e3c1058531db3226ac7780}, + all_ss_ids = {['2da30c95504bfe2cb1e3c1058531db3226ac7780']}, } @conference{Grin15a, @@ -8134,7 +10134,8 @@ @conference{Grin16 abstract = {Purpose: The presence of hemorrhages is one of the common signs of diabetic retinopathy ({DRP}), a vision threatening retinal disease affecting patient with diabetes. Automatic detection of hemorrhages is important to facilitate the timely detection of patients that would benefit from treatment to slow down or prevent disease progression to vision-threatening stages of {DRP}. We report an automatic system based on deep learning to automatically detect hemorrhages on color fundus images. Methods: Data was drawn from two public datasets ({K}aggle and {M}essidor) by selecting images with sufficient quality for analysis, including a total of 4624 and 1102 images, respectively. The {K}aggle set was split into a development (4048) and an evaluation set (576). The {M}essidor set was solely used as external evaluation set. A reference observer indicated presence of hemorrhages for all images and also marked their locations in the development set. Both evaluation sets were also scored for the presence of hemorrhages by two independent human experts. An automatic system based on deep learning, employing a convolutional neural network ({CNN}), was developed and used to identify images with hemorrhages. The {CNN} consisted of a layered architecture of 5 convolutional layers, a fully connected layer, and a final classification layer. The {CNN} used 41x41 pixel sized color patches as input in the first layer. In each convolutional layer, inputs were convolved with a set of small sized filters and the response maps were used as input in the next layer. In the last layer, an image score indicating the likelihood for the presence of hemorrhages was generated. Evaluation was performed by comparing system results and human expert annotations with the reference. Results: The reference observer marked 99 and 289 images as containing hemorrhages; and 477 and 813 as controls in the two test sets. The automatic system achieved areas ({A}z) under the receiver operating characteristics curve of 0.957 and 0.968 with sensitivity/specificity pairs of 0.889/0.912 and 0.931/0.888, whereas the human experts achieved sensitivity/specificity of 0.919/0.979 and 0.899/0.977 in the {K}aggle test set; and 0.976/0.894 and 0.958/0.872 in the {M}essidor test set. Conclusions: An automatic system was developed for the detection of hemorrhages on color fundus images. The system approaches human expert performance and allows for a fast and reliable identification of patients with moderate to severe {DRP} in a screening setup.}, optnote = {DIAG, RADIOLOGY}, gsid = {6287248310931733301}, - gscites = {3}, + gscites = {6}, + all_ss_ids = {e4db301a185bccd105017fb66e3f9e2adf876495}, } @article{Grin16a, @@ -8152,7 +10153,9 @@ @article{Grin16a pmid = {27231583}, month = {2}, gsid = {12016324152117679054}, - gscites = {7}, + gscites = {10}, + ss_id = {2ebb428ce82dc699122e1a98d518e48d0c068ed0}, + all_ss_ids = {['2ebb428ce82dc699122e1a98d518e48d0c068ed0']}, } @article{Grin16b, @@ -8170,8 +10173,10 @@ @article{Grin16b pmid = {26886969}, month = {5}, gsid = {9170568847453666888}, - gscites = {240}, + gscites = {374}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/167797}, + ss_id = {226df2c0315fcd6ac45413e70d3a0bac3e1b1072}, + all_ss_ids = {['226df2c0315fcd6ac45413e70d3a0bac3e1b1072']}, } @phdthesis{Grin16c, @@ -8188,6 +10193,23 @@ @phdthesis{Grin16c journal = {PhD thesis}, } +@article{Grob18, + author = {Grob, Dagmar and Oostveen, Luuk J. and Prokop, Mathias and Schaefer-Prokop, Cornelia M. and Sechopoulos, Ioannis and Brink, Monique}, + title = {~~Imaging of pulmonary perfusion using subtraction CT angiography is feasible in clinical practice}, + doi = {10.1007/s00330-018-5740-4}, + year = {2018}, + abstract = {Subtraction computed tomography (SCT) is a technique that uses software-based motion correction between an unenhanced and an enhanced CT scan for obtaining the iodine distribution in the pulmonary parenchyma. This technique has been implemented in clinical practice for the evaluation of lung perfusion in CT pulmonary angiography (CTPA) in patients with suspicion of acute and chronic pulmonary embolism, with acceptable radiation dose. This paper discusses the technical principles, clinical interpretation, benefits and limitations of arterial subtraction CTPA. * SCT uses motion correction and image subtraction between an unenhanced and an enhanced CT scan to obtain iodine distribution in the pulmonary parenchyma. * SCT could have an added value in detection of pulmonary embolism. * SCT requires only software implementation, making it potentially more widely available for patient care than dual-energy CT.}, + url = {http://dx.doi.org/10.1007/s00330-018-5740-4}, + file = {Grob18.pdf:pdf\\Grob18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {20}, + pages = {1408-1414}, + volume = {29}, + pmid = {30255247}, +} + @phdthesis{Grob19, author = {Dagmar Grob}, title = {Functional CT Imaging of the Lung: Substraction CT as a novel technique}, @@ -8202,6 +10224,40 @@ @phdthesis{Grob19 journal = {PhD thesis}, } +@article{Grob19a, + author = {Grob, Dagmar and Smit, Ewoud and Prince, Jip and Kist, Jakob and St\"{o}ger, Lauran and Geurts, Bram and Snoeren, Miranda M. and van Dijk, Rogier and Oostveen, Luuk J. and Prokop, Mathias and Schaefer-Prokop, Cornelia M. and Sechopoulos, Ioannis and Brink, Monique}, + title = {~~Iodine Maps from Subtraction CT or Dual-Energy CT to Detect Pulmonary Emboli with CT Angiography: A Multiple-Observer Study}, + doi = {10.1148/radiol.2019182666}, + year = {2019}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2019182666}, + file = {Grob19a.pdf:pdf\\Grob19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + automatic = {yes}, + citation-count = {31}, + pages = {197-205}, + volume = {292}, + pmid = {30860897}, +} + +@article{Grob19a, + author = {Grob, Dagmar and Smit, Ewoud and Prince, Jip and Kist, Jakob and St\"{o}ger, Lauran and Geurts, Bram and Snoeren, Miranda M. and van Dijk, Rogier and Oostveen, Luuk J. and Prokop, Mathias and Schaefer-Prokop, Cornelia M. and Sechopoulos, Ioannis and Brink, Monique}, + title = {~~Iodine Maps from Subtraction CT or Dual-Energy CT to Detect Pulmonary Emboli with CT Angiography: A Multiple-Observer Study}, + doi = {10.1148/radiol.2019182666}, + year = {2019}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2019182666}, + file = {Grob19a.pdf:pdf\\Grob19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + automatic = {yes}, + citation-count = {31}, + pages = {197-205}, + volume = {292}, + pmid = {30860897}, +} + @conference{Grob19b, author = {Grob, Dagmar and Oostveen, Luuk J. and Jacobs, Colin and Prokop, Mathias and Schaefer-Prokop, Cornelia and Sechopoulos, Ioannis and Brink, Monique}, title = {Intra-patient comparison of pulmonary nodule enhancement in subtraction CT and dual-energy CT}, @@ -8224,6 +10280,9 @@ @article{Grob20 file = {Grob20.pdf:pdf\\Grob20.pdf:PDF}, optnote = {AXTI, DIAG, RADIOLOGY}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/229492}, + ss_id = {1479138d85b4af9d10bd3c2301ea68d2406e8d85}, + all_ss_ids = {['1479138d85b4af9d10bd3c2301ea68d2406e8d85']}, + gscites = {2}, } @conference{Grob20a, @@ -8231,7 +10290,7 @@ @conference{Grob20a title = {Pulmonary nodule growth: can follow-up be shortened with a high-end or an ultra-high-resolution CT scanner?}, booktitle = ECR, year = {2020}, - abstract = {PURPOSE: To determine the interscan variability of pulmonary nodule volume measurements in CT scans acquired with state-of-the-art wide-area and ultra-high-resolution CT systems. METHODS AND MATERIALS: In this prospective study, patients with at least two non-calcified solid pulmonary nodules suspicious for metastases on previous CT scans were imaged twice with either a high-end 320 detector CT (MDCT, Aquilion ONE Genesis, Canon, slice thickness 0.5 mm, 512x512 matrix) or an ultra-high-resolution CT (UHRCT, Precision, Canon, 0.25 mm, 1024x1024). In between scans, an off-and-on table strategy was used to simulate follow-up scans with no nodule growth. Semi-automated volumetric nodule segmentation and volume estimation (max. 4 per patient, effective diameter 4-15 mm) were performed on a lung screening workstation (Veolity). 95%-limits of agreement (LOA) and the time to estimate actual nodule growth rate at a nodule volume doubling time (VDT) of 400 days were calculated. RESULTS: 17 patients (60 nodules, average volume: 218 mm3) were imaged on the MDCT and 27 patients (90 nodules, 177 mm3) on the UHRCT at a similar dose (mean dose-length-product: 126.6 mGycm vs 127.2 mGycm, respectively (p=0.98)). The 95%-LOA was ±7.0% for the MDCT and ±5.9% for the UHRCT (p=0.07). Therefore, the minimum required interscan period to detect a VDT of 400 days is 33-39 days. CONCLUSION: Both scanners result in low interscan variability, especially compared to current clinical standards, which requires a volume change of 25% (the current 95%-LOA) as significant nodule growth. Therefore, the follow-up period to detect pulmonary nodule growth could be dramatically shortened from three to about one month, reducing patient anxiety and the potential for stage shift in lung nodule management. LIMITATIONS: Pulmonary metastases instead of incidental nodules were measured.}, + abstract = {PURPOSE: To determine the interscan variability of pulmonary nodule volume measurements in CT scans acquired with state-of-the-art wide-area and ultra-high-resolution CT systems. METHODS AND MATERIALS: In this prospective study, patients with at least two non-calcified solid pulmonary nodules suspicious for metastases on previous CT scans were imaged twice with either a high-end 320 detector CT (MDCT, Aquilion ONE Genesis, Canon, slice thickness 0.5 mm, 512x512 matrix) or an ultra-high-resolution CT (UHRCT, Precision, Canon, 0.25 mm, 1024x1024). In between scans, an off-and-on table strategy was used to simulate follow-up scans with no nodule growth. Semi-automated volumetric nodule segmentation and volume estimation (max. 4 per patient, effective diameter 4-15 mm) were performed on a lung screening workstation (Veolity). 95%-limits of agreement (LOA) and the time to estimate actual nodule growth rate at a nodule volume doubling time (VDT) of 400 days were calculated. RESULTS: 17 patients (60 nodules, average volume: 218 mm3) were imaged on the MDCT and 27 patients (90 nodules, 177 mm3) on the UHRCT at a similar dose (mean dose-length-product: 126.6 mGycm vs 127.2 mGycm, respectively (p=0.98)). The 95%-LOA was +-7.0% for the MDCT and +-5.9% for the UHRCT (p=0.07). Therefore, the minimum required interscan period to detect a VDT of 400 days is 33-39 days. CONCLUSION: Both scanners result in low interscan variability, especially compared to current clinical standards, which requires a volume change of 25% (the current 95%-LOA) as significant nodule growth. Therefore, the follow-up period to detect pulmonary nodule growth could be dramatically shortened from three to about one month, reducing patient anxiety and the potential for stage shift in lung nodule management. LIMITATIONS: Pulmonary metastases instead of incidental nodules were measured.}, optnote = {AXTI, DIAG, RADIOLOGY}, } @@ -8255,7 +10314,8 @@ @inproceedings{Gube11 file = {Gube11.pdf:pdf/Gube11.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {6065729845957490171}, - gscites = {16}, + gscites = {18}, + all_ss_ids = {['5d799e097d8f9b91a509bce2ebb98a04006c0c24']}, } @inproceedings{Gube11a, @@ -8287,7 +10347,9 @@ @inproceedings{Gube12 number = {Pt 2}, pmid = {23286070}, gsid = {2427097091307085452}, - gscites = {40}, + gscites = {45}, + ss_id = {2b40545a6af971b370932b854150e74c623f7357}, + all_ss_ids = {['2b40545a6af971b370932b854150e74c623f7357']}, } @inproceedings{Gube13, @@ -8303,7 +10365,9 @@ @inproceedings{Gube13 optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {14620347730568680552}, - gscites = {11}, + gscites = {14}, + ss_id = {024137b64819b9f18837d405e4ecff2572468c80}, + all_ss_ids = {['024137b64819b9f18837d405e4ecff2572468c80']}, } @inproceedings{Gube13a, @@ -8333,7 +10397,9 @@ @article{Gube14 pmid = {24465808}, month = {1}, gsid = {9649554823203874553}, - gscites = {111}, + gscites = {157}, + ss_id = {15e91f8c7fc112ced9946aef9761fa92a1f235c0}, + all_ss_ids = {['15e91f8c7fc112ced9946aef9761fa92a1f235c0']}, } @article{Gube15, @@ -8352,7 +10418,9 @@ @article{Gube15 number = {1}, pmid = {25561456}, gsid = {13886477572492923291}, - gscites = {90}, + gscites = {120}, + ss_id = {9356a0862ad0dd269e24f09924d2d1f99ea7fda9}, + all_ss_ids = {['9356a0862ad0dd269e24f09924d2d1f99ea7fda9']}, } @phdthesis{Gube15a, @@ -8385,7 +10453,9 @@ @article{Gube15b optnote = {DIAG, RADIOLOGY}, pmid = {25532510}, gsid = {10687699890920421893}, - gscites = {88}, + gscites = {115}, + ss_id = {60149277867d3e86391c0aa4ce5720e1a1e5cb48}, + all_ss_ids = {['60149277867d3e86391c0aa4ce5720e1a1e5cb48']}, } @conference{Gube15c, @@ -8441,7 +10511,9 @@ @article{Gube16 optnote = {DIAG, RADIOLOGY}, pmid = {26781154}, gsid = {1691742795221291249}, - gscites = {17}, + gscites = {25}, + ss_id = {7620ef3e0f908161af57191c25b7ed849734f1cf}, + all_ss_ids = {['7620ef3e0f908161af57191c25b7ed849734f1cf']}, } @inproceedings{Gube16a, @@ -8456,7 +10528,9 @@ @inproceedings{Gube16a optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {6334850745608942532}, - gscites = {1}, + gscites = {2}, + ss_id = {43e4b8914c087b637ae22b6b4f85f70e31e6e4e8}, + all_ss_ids = {['43e4b8914c087b637ae22b6b4f85f70e31e6e4e8']}, } @mastersthesis{Gucl21, @@ -8470,6 +10544,15 @@ @mastersthesis{Gucl21 journal = {Master thesis}, } +@conference{Guev23, + author = {Bryan Cardenas Guevara and Niccolo Marini and Stefano Marchesin and Witali Aswolinskiy and Robert-Jan Schlimbach and Damian Podareanu and Francesco Ciompi}, + booktitle = {MIDL}, + title = {Caption generation from histopathology whole-slide images using pre-trained transformers}, + abstract = {The recent advent of foundation models and large language models has enabled scientists to leverage large-scale knowledge of pretrained (vision) transformers and efficiently tailor it to downstream tasks. This technology can potentially automate multiple aspects of cancer diagnosis in digital pathology, from whole-slide image classification to generating pathology reports while training with pairs of images and text from the diagnostic conclusion. In this work, we orchestrate a set of weakly-supervised transformer-based models with a first aim to address both whole-slide image classification and captioning, addressing the automatic generation of the conclusion of pathology reports in the form of image captions. We report our first results on a multicentric multilingual dataset of colon polyps and biopsies. We achieve high diagnostic accuracy with no supervision and cheap computational adaptation.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + @article{Habi20, author = {Habib, Shifa Salman and Rafiq, Sana and Zaidi, Syed Mohammad Asad and Ferrand, Rashida Abbas and Creswell, Jacob and Van Ginneken, Bram and Jamal, Wafa Zehra and Azeemi, Kiran Sohail and Khowaja, Saira and Khan, Aamir}, title = {Evaluation of computer aided detection of tuberculosis on chest radiography among people with diabetes in Karachi Pakistan}, @@ -8484,16 +10567,18 @@ @article{Habi20 pmid = {32286389}, year = {2020}, month = {4}, + all_ss_ids = {['13122b7c0ef4a596875eff981651d60140e67417', '142f9f46a9ca5a99de4c7b819d801d245a9334ba']}, + gscites = {13}, } @mastersthesis{Hack21, author = {Roel HACKING}, title = {Combining CT scans and clinical features for improved automated COVID-19 detection}, abstract = {During the first peak of the COVID-19 pandemic, hospitals in hard-hit regions were overflowing with patients at the emergency unit with respiratory complaints. Since the RT-PCR test was in limited supply at the time and test results took a long time to obtain, many hospitals opted to use chest CT scans of COVID-19 suspects. - As a result of this, several studies examined the possibility of automating the detection of COVID-19 in CT scans. One such study, by Lessmann et al., 2020, developed a model to predict COVID-19 severity scores based on these chest CT scans. In this thesis, we extended their model in several ways to take into account additional clinical values (such as blood values, sex, and age) to predict either PCR outcomes or clinical diagnoses. - Based on data from the Canisius-Wilhelmina Ziekenhuis (CWZ) hospital and Radboudumc hospitals, as well as the COVID-19 dataset by Ning et al., 2020, we found that integrating these two modalities can indeed lead to improved performance when both clinical and visual features are of sufficient quality. When training on data from the CWZ hospital and evaluating on data from the Radboudumc hospital, models using only clinical features or visual features achieved Area Under the ROC Curve (AUC) values of 0.773 and 0.826, respectively; their combination resulted in an AUC of 0.851. - Similarly, when training on data from the Union hospital in the iCTCF dataset and predicting on data from the Union hospital in that same dataset, we obtained AUCs of 0.687 and 0.812 for clinical and visual features, respectively; their combination resulted in an AUC of 0.862. - However, we also discovered that the patterns of missing data present in these clinical feature datasets can play an essential role in the performance of the models fitted on them. We thus developed additional methods to analyze and mitigate this effect to obtain fairer evaluations and increase model generalizability. Still, the high diagnostic performance of some of our models suggests that they could be adapted into clinical practice, and our methods pertaining to missing data could be used to aid further research using clinical feature datasets.}, + As a result of this, several studies examined the possibility of automating the detection of COVID-19 in CT scans. One such study, by Lessmann et al., 2020, developed a model to predict COVID-19 severity scores based on these chest CT scans. In this thesis, we extended their model in several ways to take into account additional clinical values (such as blood values, sex, and age) to predict either PCR outcomes or clinical diagnoses. + Based on data from the Canisius-Wilhelmina Ziekenhuis (CWZ) hospital and Radboudumc hospitals, as well as the COVID-19 dataset by Ning et al., 2020, we found that integrating these two modalities can indeed lead to improved performance when both clinical and visual features are of sufficient quality. When training on data from the CWZ hospital and evaluating on data from the Radboudumc hospital, models using only clinical features or visual features achieved Area Under the ROC Curve (AUC) values of 0.773 and 0.826, respectively; their combination resulted in an AUC of 0.851. + Similarly, when training on data from the Union hospital in the iCTCF dataset and predicting on data from the Union hospital in that same dataset, we obtained AUCs of 0.687 and 0.812 for clinical and visual features, respectively; their combination resulted in an AUC of 0.862. + However, we also discovered that the patterns of missing data present in these clinical feature datasets can play an essential role in the performance of the models fitted on them. We thus developed additional methods to analyze and mitigate this effect to obtain fairer evaluations and increase model generalizability. Still, the high diagnostic performance of some of our models suggests that they could be adapted into clinical practice, and our methods pertaining to missing data could be used to aid further research using clinical feature datasets.}, file = {Hack21.pdf:pdf/Hack21.pdf:PDF}, optnote = {DIAG}, school = {Radboud University}, @@ -8507,14 +10592,14 @@ @conference{Hadd19 booktitle = {EACR}, year = {2019}, abstract = {Background and Objective: - A three-dimensional visualization of a human carcinoma could provide invaluable diagnostic information and redefine how we perceive and analyze cancer invasion. As deep learning begins automating the diagnostic workflow and cutting-edge microcopy provides unprecedented ways of visualizing tissue, combining these methologies could provide novel insight into malignant tumors and other pathologic entities. By combining Knife-Edge Scanning Microscopy with convolutional neural networks, we set out to visualize an entire threedimensional colorectal carcinoma segmented into specific tissue classifications. - -Methods: -A Knife-Edge Scanning Microscope (KESM), developed by Strateos (San Francisco, CA, USA), was used to digitize a whole-mount, H&E stained, formalinfixed paraffin-embedded human tissue specimen obtained from the Radboudumc (Nijmegen, Netherlands). Sparse manual annotations of 5 tissue types (tumor, stroma, muscle, healthy glands, background) were provided using KESM data to train a convolutional neural network developed by the Computational Pathology Group (Radboudumc) for semantic segmentation of the colorectal carcinoma tissue. The three-dimensional visualization was generated using 3Scan’s proprietary visualization pipeline. - -Results: The convolutional neural network was used to process roughly 1200 slices of KESM data. The stitched and rendered segmentation maps demonstrate the formalin-fixed paraffin-embedded carcinoma of roughly 5 millimeters in depth. As shown in the figure, the tumor invasive margin can be seen advancing into the surrounding tumor stroma. - -Conclusion: Based on our findings, we were capable of training a segmentation model on the 3D KESM data to create an accurate representation of a formalin-fixed paraffin-embedded colorectal carcinoma tissue block segmented into five tissue classifications. Going forward, this can have much broader implications on the research and understanding of invasive tumors.}, + A three-dimensional visualization of a human carcinoma could provide invaluable diagnostic information and redefine how we perceive and analyze cancer invasion. As deep learning begins automating the diagnostic workflow and cutting-edge microcopy provides unprecedented ways of visualizing tissue, combining these methologies could provide novel insight into malignant tumors and other pathologic entities. By combining Knife-Edge Scanning Microscopy with convolutional neural networks, we set out to visualize an entire threedimensional colorectal carcinoma segmented into specific tissue classifications. + + Methods: + A Knife-Edge Scanning Microscope (KESM), developed by Strateos (San Francisco, CA, USA), was used to digitize a whole-mount, H&E stained, formalinfixed paraffin-embedded human tissue specimen obtained from the Radboudumc (Nijmegen, Netherlands). Sparse manual annotations of 5 tissue types (tumor, stroma, muscle, healthy glands, background) were provided using KESM data to train a convolutional neural network developed by the Computational Pathology Group (Radboudumc) for semantic segmentation of the colorectal carcinoma tissue. The three-dimensional visualization was generated using 3Scan's proprietary visualization pipeline. + + Results: The convolutional neural network was used to process roughly 1200 slices of KESM data. The stitched and rendered segmentation maps demonstrate the formalin-fixed paraffin-embedded carcinoma of roughly 5 millimeters in depth. As shown in the figure, the tumor invasive margin can be seen advancing into the surrounding tumor stroma. + + Conclusion: Based on our findings, we were capable of training a segmentation model on the 3D KESM data to create an accurate representation of a formalin-fixed paraffin-embedded colorectal carcinoma tissue block segmented into five tissue classifications. Going forward, this can have much broader implications on the research and understanding of invasive tumors.}, optnote = {DIAG, RADIOLOGY}, } @@ -8527,23 +10612,41 @@ @conference{Hadd20 optnote = {DIAG, RADIOLOGY}, } +@article{Hadd21, + author = {Haddad, Tariq Sami and Lugli, Alessandro and Aherne, Susan and Barresi, Valeria and Terris, Beno\^{i}t and Bokhorst, John-Melle and Brockmoeller, Scarlet Fiona and Cuatrecasas, Miriam and Simmer, Femke and El-Zimaity, Hala and Fl\'{e}jou, Jean-Fran\c{c}ois and Gibbons, David and Cathomas, Gieri and Kirsch, Richard and Kuhlmann, Tine Plato and Langner, Cord and Loughrey, Maurice B. and Riddell, Robert and Ristim\"{a}ki, Ari and Kakar, Sanjay and Sheahan, Kieran and Treanor, Darren and van der Laak, Jeroen and Vieth, Michael and Zlobec, Inti and Nagtegaal, Iris D.}, + title = {~~Improving tumor budding reporting in colorectal cancer: a Delphi consensus study}, + doi = {10.1007/s00428-021-03059-9}, + year = {2021}, + abstract = {AbstractTumor budding is a long-established independent adverse prognostic marker in colorectal cancer, yet methods for its assessment have varied widely. In an effort to standardize its reporting, a group of experts met in Bern, Switzerland, in 2016 to reach consensus on a single, international, evidence-based method for tumor budding assessment and reporting (International Tumor Budding Consensus Conference [ITBCC]). Tumor budding assessment using the ITBCC criteria has been validated in large cohorts of cancer patients and incorporated into several international colorectal cancer pathology and clinical guidelines. With the wider reporting of tumor budding, new issues have emerged that require further clarification. To better inform researchers and health-care professionals on these issues, an international group of experts in gastrointestinal pathology participated in a modified Delphi process to generate consensus and highlight areas requiring further research. This effort serves to re-affirm the importance of tumor budding in colorectal cancer and support its continued use in routine clinical practice.}, + url = {http://dx.doi.org/10.1007/s00428-021-03059-9}, + file = {Hadd21.pdf:pdf\\Hadd21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Virchows Archiv}, + automatic = {yes}, + citation-count = {24}, + pages = {459-469}, + volume = {479}, + pmid = {33650042}, +} @article{Hadj22, - author = {Lubomir Hadjiiski and Kenny Cha and Heang-Ping Chan and Karen Drukker and Lia Morra and Janne J. Näppi and Berkman Sahiner and Hiroyuki Yoshida and Quan Chen and Thomas M. Deserno and Hayit Greenspan and Henkjan Huisman and Zhimin Huo and Richard Mazurchuk and Nicholas Petrick and Daniele Regge and Ravi Samala and Ronald M. Summers and Kenji Suzuki and Georgia Tourassi and Daniel Vergara and Samuel G. Armato III}, + author = {Lubomir Hadjiiski and Kenny Cha and Heang-Ping Chan and Karen Drukker and Lia Morra and Janne J. Nappi and Berkman Sahiner and Hiroyuki Yoshida and Quan Chen and Thomas M. Deserno and Hayit Greenspan and Henkjan Huisman and Zhimin Huo and Richard Mazurchuk and Nicholas Petrick and Daniele Regge and Ravi Samala and Ronald M. Summers and Kenji Suzuki and Georgia Tourassi and Daniel Vergara and Samuel G. Armato III}, title = {AAPM task group report 273: Recommendations on best practices for AI and machine learning for computer-aided diagnosis in medical imaging}, journal = {Medical Physics}, year = {2022}, doi = {https://doi.org/10.1002/mp.16188}, - abstract = {Rapid advances in artificial intelligence (AI) and machine learning, and specifically in deep learning (DL) techniques, have enabled broad application of these methods in health care. The promise of the DL approach has spurred further interest in computer-aided diagnosis (CAD) development and applications using both “traditional” machine learning methods and newer DL-based methods. We use the term CAD-AI to refer to this expanded clinical decision support environment that uses traditional and DL-based AI methods. - -Numerous studies have been published to date on the development of machine learning tools for computer-aided, or AI-assisted, clinical tasks. However, most of these machine learning models are not ready for clinical deployment. It is of paramount importance to ensure that a clinical decision support tool undergoes proper training and rigorous validation of its generalizability and robustness before adoption for patient care in the clinic. - -To address these important issues, the American Association of Physicists in Medicine (AAPM) Computer-Aided Image Analysis Subcommittee (CADSC) is charged, in part, to develop recommendations on practices and standards for the development and performance assessment of computer-aided decision support systems. The committee has previously published two opinion papers on the evaluation of CAD systems and issues associated with user training and quality assurance of these systems in the clinic. With machine learning techniques continuing to evolve and CAD applications expanding to new stages of the patient care process, the current task group report considers the broader issues common to the development of most, if not all, CAD-AI applications and their translation from the bench to the clinic. The goal is to bring attention to the proper training and validation of machine learning algorithms that may improve their generalizability and reliability and accelerate the adoption of CAD-AI systems for clinical decision support.}, + abstract = {Rapid advances in artificial intelligence (AI) and machine learning, and specifically in deep learning (DL) techniques, have enabled broad application of these methods in health care. The promise of the DL approach has spurred further interest in computer-aided diagnosis (CAD) development and applications using both "traditional" machine learning methods and newer DL-based methods. We use the term CAD-AI to refer to this expanded clinical decision support environment that uses traditional and DL-based AI methods. + + Numerous studies have been published to date on the development of machine learning tools for computer-aided, or AI-assisted, clinical tasks. However, most of these machine learning models are not ready for clinical deployment. It is of paramount importance to ensure that a clinical decision support tool undergoes proper training and rigorous validation of its generalizability and robustness before adoption for patient care in the clinic. + + To address these important issues, the American Association of Physicists in Medicine (AAPM) Computer-Aided Image Analysis Subcommittee (CADSC) is charged, in part, to develop recommendations on practices and standards for the development and performance assessment of computer-aided decision support systems. The committee has previously published two opinion papers on the evaluation of CAD systems and issues associated with user training and quality assurance of these systems in the clinic. With machine learning techniques continuing to evolve and CAD applications expanding to new stages of the patient care process, the current task group report considers the broader issues common to the development of most, if not all, CAD-AI applications and their translation from the bench to the clinic. The goal is to bring attention to the proper training and validation of machine learning algorithms that may improve their generalizability and reliability and accelerate the adoption of CAD-AI systems for clinical decision support.}, file = {Hadj22.pdf:pdf\\Hadj22.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {df2cedb6640c9c7e0627fb03cf26b49e82a154b0}, + all_ss_ids = {['df2cedb6640c9c7e0627fb03cf26b49e82a154b0']}, + gscites = {8}, } - @inproceedings{Hage21, title = {Variable Fraunhofer MEVIS RegLib Comprehensively Applied to Learn2Reg Challenge}, author = {H{\"a}ger, Stephanie and Heldmann, Stefan and Hering, Alessa and Kuckertz, Sven and Lange, Annkristin}, @@ -8556,6 +10659,9 @@ @inproceedings{Hage21 doi = {10.1007/978-3-030-71827-5_9}, optnote = {DIAG, RADIOLOGY}, abstract = {In this paper, we present our contribution to the learn2reg challenge. We applied the Fraunhofer MEVIS registration library RegLib comprehensively to all 4 tasks of the challenge. For tasks 1-3, we used a classic iterative registration method with NGF distance measure, second order curvature regularizer, and a multi-level optimization scheme. For task 4, a deep learning approach with a weakly supervised trained U-Net was applied using the same cost function as in the iterative approach.}, + ss_id = {ac718bfeb8188b8540e72b81b81ff22c9f5f7b44}, + all_ss_ids = {['ac718bfeb8188b8540e72b81b81ff22c9f5f7b44']}, + gscites = {4}, } @inproceedings{Hago18, @@ -8567,6 +10673,9 @@ @inproceedings{Hago18 abstract = {Convolutional Neural Networks (CNN) have had a huge success in many areas of computer vision and medical image analysis. However, there is still an immense potential for performance improvement in mammogram breast cancer detection Computer-Aided Detection (CAD) systems by integrating all the information that radiologist utilizes, such as symmetry and temporal data. In this work, we proposed a patch based multi-input CNN that learns symmetrical difference to detect breast masses. The network was trained on a large-scale dataset of 28294 mammogram images. The performance was compared to a baseline architecture without symmetry context using Area Under the ROC Curve (AUC) and Competition Performance Metric (CPM). At candidate level, AUC value of 0.933 with 95% confidence interval of [0.920, 0.954] was obtained when symmetry information is incorporated in comparison with baseline architecture which yielded AUC value of 0.929 with [0.919, 0.947] confidence interval. By incorporating symmetrical information, although there was no a significant candidate level performance again (p=0.111), we have found a compelling result at exam level with CPM value of 0.733 (p=0.001). We believe that including temporal data, and adding benign class to the dataset could improve the detection performance.}, file = {Hago18.pdf:pdf/Hago18.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {d0df46743b04218d7a0800c82aa2ea775973a9ae}, + all_ss_ids = {['d0df46743b04218d7a0800c82aa2ea775973a9ae']}, + gscites = {25}, } @article{Hali19, @@ -8585,8 +10694,10 @@ @article{Hali19 optnote = {DIAG}, pmid = {31406196}, gsid = {12149180146622984473}, - gscites = {2}, + gscites = {15}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/207039}, + ss_id = {06578dbe1d4b788eb0b6b0c84e3d4fae0ddf3a37}, + all_ss_ids = {['06578dbe1d4b788eb0b6b0c84e3d4fae0ddf3a37']}, } @conference{Hall18, @@ -8614,8 +10725,10 @@ @article{Hamb08 pmid = {18791410}, month = {10}, gsid = {13099020070548141810}, - gscites = {126}, + gscites = {127}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/70642}, + ss_id = {f7c501d095bf0a0e58967aa336ab63ff912de195}, + all_ss_ids = {['f7c501d095bf0a0e58967aa336ab63ff912de195']}, } @article{Hamb10, @@ -8635,6 +10748,8 @@ @article{Hamb10 gsid = {4191275900561122443}, gscites = {415}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/87401}, + ss_id = {3f99aa1cc1a5175b1c0bca3b0c4747724cf87757}, + all_ss_ids = {['3f99aa1cc1a5175b1c0bca3b0c4747724cf87757']}, } @article{Hamb11, @@ -8654,6 +10769,8 @@ @article{Hamb11 gsid = {2584223731293899692}, gscites = {577}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/97304}, + ss_id = {0e8824d96ba3b5f58ba4114a22a6274421562f3a}, + all_ss_ids = {['0e8824d96ba3b5f58ba4114a22a6274421562f3a']}, } @article{Hamb12, @@ -8671,8 +10788,10 @@ @article{Hamb12 pmid = {21924545}, month = {1}, gsid = {18221975544734376170}, - gscites = {324}, + gscites = {329}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/109206}, + ss_id = {17a88c8b4c36924e5d0b7d9f1a6e6b99a327c535}, + all_ss_ids = {['17a88c8b4c36924e5d0b7d9f1a6e6b99a327c535']}, } @article{Hamb12a, @@ -8689,7 +10808,9 @@ @article{Hamb12a pmid = {23204542}, month = {2}, gsid = {16782773343024671219}, - gscites = {92}, + gscites = {110}, + ss_id = {be606a6037f048bd51e9dea69e7f324900f3e0f0}, + all_ss_ids = {['be606a6037f048bd51e9dea69e7f324900f3e0f0']}, } @article{Hame13, @@ -8716,6 +10837,103 @@ @conference{Hame17 optnote = {DIAG}, } +@article{Hame20, + author = {Hamer, O. W. and Rehbock, B. and Schaefer-Prokop, C.}, + title = {~~Idiopathische pulmonale Fibrose}, + doi = {10.1007/s00117-020-00675-5}, + year = {2020}, + abstract = {Die idiopathische pulmonale Lungenfibrose (IPF) ist eine chronische progressive fibrosierende interstitielle Lungenerkrankung mit schlechter Prognose. Die hochaufl\"{o}sende Computertomographie (HRCT) spielt in der Abkl\"{a}rung von Patienten mit V. a. IPF eine zentrale Rolle. In der HRCT \"{a}u\ssert sich die IPF mit dem Muster einer gew\"{o}hnlichen interstitiellen Pneumonie (Usual Interstitial Pneumonia - UIP). Lange Zeit war nur eine supportive oder immunsuppressive Therapie m\"{o}glich. Seit 2012 sind antifibrotisch wirksame Substanzen zugelassen, was die IPF mehr noch als zuvor in den Fokus des klinischen und wissenschaftlichen Interesses ger\"{u}ckt hat. Auf dem Boden der gewonnenen Erkenntnisse ist 2018 die revidierte Version der internationalen Leitlinie zur Diagnose der IPF erschienen. Die Leitlinie enth\"{a}lt unter anderem Ma\ssgaben zur Befundung der HRCT. In dem Fortbildungsartikel werden die relevanten HRCT-Zeichen vorgestellt. Die Ma\ssgaben der Leitlinie werden erl\"{a}utert.}, + url = {http://dx.doi.org/10.1007/s00117-020-00675-5}, + file = {Hame20.pdf:pdf\\Hame20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Der Radiologe}, + automatic = {yes}, + citation-count = {1}, + pages = {549-562}, + volume = {60}, + pmid = {32342119}, +} + +@article{Hare17, + author = {Hare, Hannah V. and Frost, Robert and Meakin, James A. and Bulte, Daniel P.}, + title = {~~On the Origins of the Cerebral IVIM Signal}, + doi = {10.1101/158014}, + year = {2017}, + abstract = {AbstractPurposeIntravoxel incoherent motion (IVIM) has been proposed as a means of non-invasive MRI measurement of perfusion parameters such as blood flow and blood volume. Its main competitor in the brain is arterial spin labelling (ASL). In theory, IVIM should not suffer from some of the same limitations as ASL such as poor signal in white matter, and assumptions about arterial arrival times that may be violated in the presence of pathology.MethodsIn this study we aimed to test IVIM as a viable alternative to ASL for quantitative imaging of perfusion parameters in the brain. First, a direct comparison was performed between IVIM and multi-post label delay pseudo-continuous ASL; second, IVIM images were acquired with and without nulling cerebrospinal fluid; and finally, ultra-high resolution IVIM was performed to minimise partial voluming.ResultsIn all three tests, IVIM failed to disprove the null hypothesis, strongly suggesting that, at least within the brain, the technique does not measure perfusion parameters as proposed.ConclusionFurthermore, the results obtained suggest that the contrast visible in IVIM-derived images is primarily sensitive to cerebrospinal fluid, and not the microvascular blood compartment.}, + url = {http://dx.doi.org/10.1101/158014}, + file = {Hare17.pdf:pdf\\Hare17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Preprint}, + automatic = {yes}, + citation-count = {3}, +} + +@article{Harl21, + author = {Harlianto, Netanja I and Oosterhof, Nadine and Foppen, Wouter and Hol, Marjolein E and Wittenberg, Rianne and van der Veen, Pieternella H and van Ginneken, Bram and Mohamed Hoesein, Firdaus A A and Verlaan, Jorrit-Jan and de Jong, Pim A and Westerink, Jan and van Petersen, R and van Dinther, B and Asselbergs, F W and Nathoe, H M and de Borst, G J and Bots, M L and Geerlings, M I and Emmelot, M H and de Jong, P A and Leiner, T and Lely, A T and van der Kaaij, N P and Kappelle, L J and Ruigrok, Y M and Verhaar, M C and Visseren, F L J and Westerink, J and for the UCC-SMART-Studygroup}, + title = {~~Diffuse idiopathic skeletal hyperostosis is associated with incident stroke in patients with increased cardiovascular risk}, + doi = {10.1093/rheumatology/keab835}, + year = {2021}, + abstract = {Abstract + + Objectives + Earlier retrospective studies have suggested a relation between DISH and cardiovascular disease, including myocardial infarction. The present study assessed the association between DISH and incidence of cardiovascular events and mortality in patients with high cardiovascular risk. + + + Methods + In this prospective cohort study, we included 4624 patients (mean age 58.4 years, 69.6% male) from the Second Manifestations of ARTerial disease cohort. The main end point was major cardiovascular events (MACE: stroke, myocardial infarction and vascular death). Secondary endpoints included all-cause mortality and separate vascular events. Cause-specific proportional hazard models were used to evaluate the risk of DISH on all outcomes, and subdistribution hazard models were used to evaluate the effect of DISH on the cumulative incidence. All models were adjusted for age, sex, body mass index, blood pressure, diabetes, non-HDL cholesterol, packyears, renal function and C-reactive protein. + + + Results + DISH was present in 435 (9.4%) patients. After a median follow-up of 8.7 (IQR 5.0-12.0) years, 864 patients had died and 728 patients developed a MACE event. DISH was associated with an increased cumulative incidence of ischaemic stroke. After adjustment in cause-specific modelling, DISH remained significantly associated with ischaemic stroke (HR 1.55; 95% CI: 1.01, 2.38), but not with MACE (HR 0.99; 95% CI: 0.79, 1.24), myocardial infarction (HR 0.88; 95% CI: 0.59, 1.31), vascular death (HR 0.94; 95% CI: 0.68, 1.27) or all-cause mortality (HR 0.94; 95% CI: 0.77, 1.16). + + + Conclusion + The presence of DISH is independently associated with an increased incidence and risk for ischaemic stroke, but not with MACE, myocardial infarction, vascular death or all-cause mortality. + }, + url = {http://dx.doi.org/10.1093/rheumatology/keab835}, + file = {Harl21.pdf:pdf\\Harl21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Rheumatology}, + automatic = {yes}, + citation-count = {5}, + pages = {2867-2874}, + volume = {61}, + pmid = {34357130}, +} + +@article{Harl21, + author = {Harlianto, Netanja I and Oosterhof, Nadine and Foppen, Wouter and Hol, Marjolein E and Wittenberg, Rianne and van der Veen, Pieternella H and van Ginneken, Bram and Mohamed Hoesein, Firdaus A A and Verlaan, Jorrit-Jan and de Jong, Pim A and Westerink, Jan and van Petersen, R and van Dinther, B and Asselbergs, F W and Nathoe, H M and de Borst, G J and Bots, M L and Geerlings, M I and Emmelot, M H and de Jong, P A and Leiner, T and Lely, A T and van der Kaaij, N P and Kappelle, L J and Ruigrok, Y M and Verhaar, M C and Visseren, F L J and Westerink, J and for the UCC-SMART-Studygroup}, + title = {~~Diffuse idiopathic skeletal hyperostosis is associated with incident stroke in patients with increased cardiovascular risk}, + doi = {10.1093/rheumatology/keab835}, + year = {2021}, + abstract = {Abstract + + Objectives + Earlier retrospective studies have suggested a relation between DISH and cardiovascular disease, including myocardial infarction. The present study assessed the association between DISH and incidence of cardiovascular events and mortality in patients with high cardiovascular risk. + + + Methods + In this prospective cohort study, we included 4624 patients (mean age 58.4 years, 69.6% male) from the Second Manifestations of ARTerial disease cohort. The main end point was major cardiovascular events (MACE: stroke, myocardial infarction and vascular death). Secondary endpoints included all-cause mortality and separate vascular events. Cause-specific proportional hazard models were used to evaluate the risk of DISH on all outcomes, and subdistribution hazard models were used to evaluate the effect of DISH on the cumulative incidence. All models were adjusted for age, sex, body mass index, blood pressure, diabetes, non-HDL cholesterol, packyears, renal function and C-reactive protein. + + + Results + DISH was present in 435 (9.4%) patients. After a median follow-up of 8.7 (IQR 5.0-12.0) years, 864 patients had died and 728 patients developed a MACE event. DISH was associated with an increased cumulative incidence of ischaemic stroke. After adjustment in cause-specific modelling, DISH remained significantly associated with ischaemic stroke (HR 1.55; 95% CI: 1.01, 2.38), but not with MACE (HR 0.99; 95% CI: 0.79, 1.24), myocardial infarction (HR 0.88; 95% CI: 0.59, 1.31), vascular death (HR 0.94; 95% CI: 0.68, 1.27) or all-cause mortality (HR 0.94; 95% CI: 0.77, 1.16). + + + Conclusion + The presence of DISH is independently associated with an increased incidence and risk for ischaemic stroke, but not with MACE, myocardial infarction, vascular death or all-cause mortality. + }, + url = {http://dx.doi.org/10.1093/rheumatology/keab835}, + file = {Harl21.pdf:pdf\\Harl21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Rheumatology}, + automatic = {yes}, + citation-count = {5}, + pages = {2867-2874}, + volume = {61}, + pmid = {34357130}, +} + @article{Harl22, author = {Harlianto, Netanja I. and Westerink, Jan and Hol, Marjolein E. and Wittenberg, Rianne and Foppen, Wouter and van der Veen, Pieternella H. and van Ginneken, Bram and Verlaan, Jorrit-Jan and de Jong, Pim A. and Mohamed Hoesein, Firdaus A. A. and {UCC-SMART Study Group }}, year = {2022}, @@ -8726,11 +10944,14 @@ @article{Harl22 pages = {rkac060}, volume = {6}, abstract = {Objectives: DISH has been associated with increased coronary artery calcifications and incident ischaemic stroke. The formation of bone along the spine may share pathways with calcium deposition in the aorta. We hypothesized that patients with DISH have increased vascular calcifications. Therefore we aimed to investigate the presence and extent of DISH in relation to thoracic aortic calcification (TAC) severity. - Methods: This cross-sectional study included 4703 patients from the Second Manifestation of ARTerial disease cohort, consisting of patients with cardiovascular events or risk factors for cardiovascular disease. Chest radiographs were scored for DISH using the Resnick criteria. Different severities of TAC were scored arbitrarily from no TAC to mild, moderate or severe TAC. Using multivariate logistic regression, the associations between DISH and TAC were analysed with adjustments for age, sex, BMI, diabetes, smoking status, non-high-density lipoprotein cholesterol, cholesterol lowering drug usage, renal function and blood pressure. - Results: A total of 442 patients (9.4\%) had evidence of DISH and 1789 (38\%) patients had TAC. The prevalence of DISH increased from 6.6\% in the no TAC group to 10.8\% in the mild, 14.3\% in the moderate and 17.1\% in the severe TAC group. After adjustments, DISH was significantly associated with the presence of TAC [odds ratio (OR) 1.46 [95\% CI 1.17, 1.82)]. In multinomial analyses, DISH was associated with moderate TAC [OR 1.43 (95\% CI 1.06, 1.93)] and severe TAC [OR 1.67 (95\% CI 1.19, 2.36)]. - Conclusions: Subjects with DISH have increased TACs, providing further evidence that patients with DISH have an increased burden of vascular calcifications.}, + Methods: This cross-sectional study included 4703 patients from the Second Manifestation of ARTerial disease cohort, consisting of patients with cardiovascular events or risk factors for cardiovascular disease. Chest radiographs were scored for DISH using the Resnick criteria. Different severities of TAC were scored arbitrarily from no TAC to mild, moderate or severe TAC. Using multivariate logistic regression, the associations between DISH and TAC were analysed with adjustments for age, sex, BMI, diabetes, smoking status, non-high-density lipoprotein cholesterol, cholesterol lowering drug usage, renal function and blood pressure. + Results: A total of 442 patients (9.4\%) had evidence of DISH and 1789 (38\%) patients had TAC. The prevalence of DISH increased from 6.6\% in the no TAC group to 10.8\% in the mild, 14.3\% in the moderate and 17.1\% in the severe TAC group. After adjustments, DISH was significantly associated with the presence of TAC [odds ratio (OR) 1.46 [95\% CI 1.17, 1.82)]. In multinomial analyses, DISH was associated with moderate TAC [OR 1.43 (95\% CI 1.06, 1.93)] and severe TAC [OR 1.67 (95\% CI 1.19, 2.36)]. + Conclusions: Subjects with DISH have increased TACs, providing further evidence that patients with DISH have an increased burden of vascular calcifications.}, file = {PubMed entry:http\://www.ncbi.nlm.nih.gov/pubmed/35993014:text/html}, pmid = {35993014}, + ss_id = {f8fb57d8601ac0189ea80c6412232ac6771e51f5}, + all_ss_ids = {['f8fb57d8601ac0189ea80c6412232ac6771e51f5']}, + gscites = {2}, } @article{Hart10, @@ -8746,6 +10967,26 @@ @article{Hart10 number = {1}, pmid = {20227213}, month = {4}, + ss_id = {712af65240c4f97f06da6178849a64f218a7cadc}, + all_ss_ids = {['712af65240c4f97f06da6178849a64f218a7cadc']}, + gscites = {47}, +} + +@article{Hart20, + author = {Hartman, Douglas Joseph and Van Der Laak, Jeroen A.W.M. and Gurcan, Metin N. and Pantanowitz, Liron}, + title = {~~Value of Public Challenges for the Development of Pathology Deep Learning Algorithms}, + doi = {10.4103/jpi.jpi_64_19}, + year = {2020}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.4103/jpi.jpi_64_19}, + file = {Hart20.pdf:pdf\\Hart20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Pathology Informatics}, + automatic = {yes}, + citation-count = {24}, + pages = {7}, + volume = {11}, + pmid = {32318315}, } @article{Hayd15a, @@ -8780,6 +11021,25 @@ @article{Hayd18 pmid = {30373671}, } +@article{Heba20, + author = {Hebar, Timea and Snoj, Ziga and Sconfienza, Luca Maria and Vanhoenacker, Filip Maria H.M. and Shahabpour, Maryam and Salapura, Vladka and Isaac, Amanda and Drakonaki, Eleni and Vasilev, Yurii and Drape, Jean-Luc and Adriaensen, Miraude and Friedrich, Klaus and Guglielmi, Giuseppe and Vieira, Alberto and Sanal, Hatice Tuba and Kerttula, Liisa and Hellund, Johan Castberg and Nagy, Judit and Heuck, Andreas and Rutten, Matthieu and Tzalonikou, Maria and Hansen, Ulrich and Niemunis-Sawicka, Joanna and Becce, Fabio and Silvestri, Enzo and Juan, Eva Llopis San and W\"{o}rtler, Klaus}, + title = {~~Present Status of Musculoskeletal Radiology in Europe: International Survey by the European Society of Musculoskeletal Radiology}, + doi = {10.1055/s-0040-1713119}, + year = {2020}, + abstract = {No official data exist on the status of musculoskeletal (MSK) radiology in Europe. The Committee for National Societies conducted an international survey to understand the status of training, subspecialization, and local practice among the European Society of Musculoskeletal Radiology (ESSR) partner societies. This article reports the results of that survey. An online questionnaire was distributed to all 26 European national associations that act as official partner societies of the ESSR. The 24 questions were subdivided into six sections: society structure, relationship with the national radiological society, subspecialization, present radiology practice, MSK interventional procedures, and MSK ultrasound. The findings of our study show a lack of standardized training and/or accreditation methods in the field of MSK radiology at a national level. The European diploma in musculoskeletal radiology is directed to partly overcome this problem; however, this certification is still underrecognized. Using certification methods, a more homogeneous European landscape could be created in the future with a view to subspecialist training. MSK ultrasound and MSK interventional procedures should be performed by a health professional with a solid knowledge of the relevant imaging modalities and sufficient training in MSK radiology. Recognition of MSK radiology as an official subspecialty would make the field more attractive for younger colleagues as well as attracting the brightest and best, an important key to further development of both clinical and academic radiology. + Key Points + }, + url = {http://dx.doi.org/10.1055/s-0040-1713119}, + file = {Heba20.pdf:pdf\\Heba20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Seminars in Musculoskeletal Radiology}, + automatic = {yes}, + citation-count = {9}, + pages = {323-330}, + volume = {24}, + pmid = {32987429}, +} + @article{Hees19, author = {Heesterbeek, Thomas J and de Jong, Eiko K and Acar, Ilhan E and Groenewoud, Joannes M M and Liefers, Bart and S\'{a}nchez, Clara I. and Peto, Tunde and Hoyng, Carel B and Pauleikhoff, Daniel and Hense, Hans W and den Hollander, Anneke I}, title = {Genetic risk score has added value over initial clinical grading stage in predicting disease progression in age-related macular degeneration}, @@ -8795,7 +11055,9 @@ @article{Hees19 optnote = {DIAG, RADIOLOGY}, pmid = {31036867}, gsid = {5584625454185103300}, - gscites = {7}, + gscites = {20}, + ss_id = {560e2100afbc64953c5155459b1f459f6e55003e}, + all_ss_ids = {['560e2100afbc64953c5155459b1f459f6e55003e']}, } @article{Heet11, @@ -8812,7 +11074,9 @@ @article{Heet11 number = {18}, pmid = {21466728}, gsid = {7415660003160210862}, - gscites = {1}, + gscites = {2}, + ss_id = {8b197ae83250002327590780c4303b5b038b9e2b}, + all_ss_ids = {['8b197ae83250002327590780c4303b5b038b9e2b']}, } @article{Heim07a, @@ -8831,6 +11095,8 @@ @article{Heim07a month = {7}, gsid = {9086501525841300261}, gscites = {354}, + ss_id = {1f2daa572caaeb54270d83a0f743f3a3f7f2a207}, + all_ss_ids = {['1f2daa572caaeb54270d83a0f743f3a3f7f2a207']}, } @article{Heim09, @@ -8848,8 +11114,10 @@ @article{Heim09 pmid = {19211338}, month = {8}, gsid = {6089436202002220059}, - gscites = {813}, + gscites = {958}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/81757}, + ss_id = {75352a06f8f39701d59c3a3a78e5cce6dd469ea9}, + all_ss_ids = {['75352a06f8f39701d59c3a3a78e5cce6dd469ea9']}, } @conference{Hend21, @@ -8873,6 +11141,90 @@ @article{Hend21a file = {:pdf/Hend21a.pdf:PDF}, abstract = {Purpose: To compare the performance of a convolutional neural network (CNN) to 11 radiologists in detecting scaphoid fractures on conventional radiographs of the hand, wrist, and scaphoid. Materials and Methods: At two hospitals (Hospitals A and B), three datasets consisting of conventional hand, wrist, and scaphoid radiographs were retrospectively retrieved: a dataset of 1039 radiographs (775 patients [mean age, 48 +- 23 years; 505 females], period: 2017--2019, Hospitals A and B) for developing a scaphoid segmentation CNN, a dataset of 3000 radiographs (1846 patients [mean age, 42 +- 22 years; 937 females], period: 2003--2019, Hospital B) for developing a scaphoid fracture detection CNN, and a dataset of 190 radiographs (190 patients [mean age, 43 +- 20 years; 77 female] period: 2011--2020, Hospital A) for testing the complete fracture detection system. Both CNNs were applied consecutively: the segmentation CNN localized the scaphoid and then passed the relevant region to the detection CNN for fracture detection. In an observer study, the performance of the system was compared with 11 radiologists. Evaluation metrics included the Dice similarity coefficient (DSC), Hausdorff distance (HD), sensitivity, specificity, positive predictive value (PPV), and area under the receiver operating characteristic curve (AUC). Results: The segmentation CNN achieved a DSC of 97.4% +- 1.4 with an HD of 1.31 mm +- 1.03. The detection CNN had a sensitivity of 78% (95% CI: 70, 86), specificity of 84% (95% CI: 77, 92), PPV of 83% (95% CI: 77, 90), and AUC of 0.87 (95% CI: 0.81, 0.91). There was no difference between the AUC of the CNN and the radiologists (0.87 [95% CI: 0.81, 0.91] versus 0.83 [radiologist range: 0.78-0.85]; P = .09). Conclusion: The developed CNN achieved radiologist-level performance in detecting scaphoid fractures on conventional radiographs of the hand, wrist, and scaphoid.}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/238628}, + ss_id = {afbdf450023be86a89bbec9dea91f031e2b76cb2}, + all_ss_ids = {['afbdf450023be86a89bbec9dea91f031e2b76cb2']}, + gscites = {15}, +} + +@article{Hend23, + author = {Hendrix, Nils and Hendrix, Ward and van Dijke, Kees and Maresch, Bas and Maas, Mario and Bollen, Stijn and Scholtens, Alexander and de Jonge, Milko and Ong, Lee-Ling Sharon and van Ginneken, Bram and Rutten, Matthieu}, + title = {Musculoskeletal radiologist-level performance by using deep learning for detection of scaphoid fractures on conventional multi-view radiographs of hand and wrist}, + doi = {https://doi.org/10.1007/s00330-022-09205-4}, + url = {https://link.springer.com/article/10.1007/s00330-022-09205-4}, + abstract = {Objectives: To assess how an artificial intelligence (AI) algorithm performs against five experienced musculoskeletal radiologists in diagnosing scaphoid fractures and whether it aids their diagnosis on conventional multi-view radiographs. + + Methods: Four datasets of conventional hand, wrist, and scaphoid radiographs were retrospectively acquired at two hospitals (hospitals A and B). Dataset 1 (12,990 radiographs from 3353 patients, hospital A) and dataset 2 (1117 radiographs from 394 patients, hospital B) were used for training and testing a scaphoid localization and laterality classification component. Dataset 3 (4316 radiographs from 840 patients, hospital A) and dataset 4 (688 radiographs from 209 patients, hospital B) were used for training and testing the fracture detector. The algorithm was compared with the radiologists in an observer study. Evaluation metrics included sensitivity, specificity, positive predictive value (PPV), area under the characteristic operating curve (AUC), Cohen's kappa coefficient (k), fracture localization precision, and reading time. + + Results: The algorithm detected scaphoid fractures with a sensitivity of 72%, specificity of 93%, PPV of 81%, and AUC of 0.88. The AUC of the algorithm did not differ from each radiologist (0.87 [radiologists' mean], p >=.05). AI assistance improved five out of ten pairs of inter-observer Cohen's k agreements (p <.05) and reduced reading time in four radiologists (p <.001), but did not improve other metrics in the majority of radiologists (p >=.05). + + Conclusions: The AI algorithm detects scaphoid fractures on conventional multi-view radiographs at the level of five experienced musculoskeletal radiologists and could significantly shorten their reading time.}, + file = {Hend23.pdf:pdf\\Hend23.pdf:PDF}, + journal = ER, + volume = {33}, + pages = {1575--1588}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {70686c2db88212d610871b007083bcc58876b49d}, + all_ss_ids = {['70686c2db88212d610871b007083bcc58876b49d']}, + gscites = {6}, +} + +@article{Hend23a, + author = {Hendrix, Ward and Rutten, Matthieu and Hendrix, Nils and van Ginneken, Bram and Schaefer-Prokop, Cornelia and Scholten, Ernst T. and Prokop, Mathias and Jacobs, Colin}, + title = {Trends in the incidence of pulmonary nodules in chest computed tomography: 10-year results from two Dutch hospitals}, + doi = {10.1007/s00330-023-09826-3}, + pmid = {37338552}, + url = {https://doi.org/10.1007/s00330-023-09826-3}, + abstract = {Objective + To study trends in the incidence of reported pulmonary nodules and stage I lung cancer in chest CT. + + Methods + We analyzed the trends in the incidence of detected pulmonary nodules and stage I lung cancer in chest CT scans in the period between 2008 and 2019. Imaging metadata and radiology reports from all chest CT studies were collected from two large Dutch hospitals. A natural language processing algorithm was developed to identify studies with any reported pulmonary nodule. + + Results + Between 2008 and 2019, a total of 74,803 patients underwent 166,688 chest CT examinations at both hospitals combined. During this period, the annual number of chest CT scans increased from 9955 scans in 6845 patients in 2008 to 20,476 scans in 13,286 patients in 2019. The proportion of patients in whom nodules (old or new) were reported increased from 38% (2595/6845) in 2008 to 50% (6654/13,286) in 2019. The proportion of patients in whom significant new nodules (>= 5 mm) were reported increased from 9% (608/6954) in 2010 to 17% (1660/9883) in 2017. The number of patients with new nodules and corresponding stage I lung cancer diagnosis tripled and their proportion doubled, from 0.4% (26/6954) in 2010 to 0.8% (78/9883) in 2017. + + Conclusion + The identification of incidental pulmonary nodules in chest CT has steadily increased over the past decade and has been accompanied by more stage I lung cancer diagnoses. + + Clinical relevance statement + These findings stress the importance of identifying and efficiently managing incidental pulmonary nodules in routine clinical practice.}, + file = {Hend23a.pdf:pdf\\Hend23a.pdf:PDF}, + journal = ER, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {9a589d8cc38a4770cf3d5819fc363a814902bb42}, + all_ss_ids = {['9a589d8cc38a4770cf3d5819fc363a814902bb42']}, + gscites = {4}, +} + +@article{Hend23b, + author = {Hendrix, Ward and Hendrix, Nils and Scholten, Ernst T. and Mourits, Mari\"{e}lle and Trap-de Jong, Joline and Schalekamp, Steven and Korst, Mike and van Leuken, Maarten and van Ginneken, Bram and Prokop, Mathias and Rutten, Matthieu and Jacobs, Colin}, + title = {Deep learning for the detection of benign and malignant pulmonary nodules in non-screening chest CT scans}, + doi = {10.1038/s43856-023-00388-5}, + url = {http://dx.doi.org/10.1038/s43856-023-00388-5}, + volume = {3}, + number = {1}, + algorithm = {https://grand-challenge.org/algorithms/lung-nodule-detector-for-ct/}, + abstract = {Abstract + Background + Outside a screening program, early-stage lung cancer is generally diagnosed after the detection of incidental nodules in clinically ordered chest CT scans. Despite the advances in artificial intelligence (AI) systems for lung cancer detection, clinical validation of these systems is lacking in a non-screening setting. + + Method + We developed a deep learning-based AI system and assessed its performance for the detection of actionable benign nodules (requiring follow-up), small lung cancers, and pulmonary metastases in CT scans acquired in two Dutch hospitals (internal and external validation). A panel of five thoracic radiologists labeled all nodules, and two additional radiologists verified the nodule malignancy status and searched for any missed cancers using data from the national Netherlands Cancer Registry. The detection performance was evaluated by measuring the sensitivity at predefined false positive rates on a free receiver operating characteristic curve and was compared with the panel of radiologists. + + Results + On the external test set (100 scans from 100 patients), the sensitivity of the AI system for detecting benign nodules, primary lung cancers, and metastases is respectively 94.3% (82/87, 95% CI: 88.1-98.8%), 96.9% (31/32, 95% CI: 91.7-100%), and 92.0% (104/113, 95% CI: 88.5-95.5%) at a clinically acceptable operating point of 1 false positive per scan (FP/s). These sensitivities are comparable to or higher than the radiologists, albeit with a slightly higher FP/s (average difference of 0.6). + + Conclusions + The AI system reliably detects benign and malignant pulmonary nodules in clinically indicated CT scans and can potentially assist radiologists in this setting.}, + citation-count = {0}, + file = {Hend23b.pdf:pdf\Hend23b.pdf:PDF}, + journal = {Communications Medicine}, + pages = {156}, + optnote = {DIAG, RADIOLOGY}, + pmid = {37891360}, + year = {2023}, } @inproceedings{Heri18, @@ -8886,6 +11238,9 @@ @inproceedings{Heri18 file = {Heri18.pdf:pdf\\Heri18.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, year = {2019}, + ss_id = {1216c70fa9e3eaa3ff5f7755e1d04147caed4818}, + all_ss_ids = {['1216c70fa9e3eaa3ff5f7755e1d04147caed4818']}, + gscites = {40}, } @inproceedings{Heri19, @@ -8902,7 +11257,9 @@ @inproceedings{Heri19 file = {Heri19.pdf:pdf\\Heri19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {17321305290346990836}, - gscites = {5}, + gscites = {50}, + ss_id = {aefbf6afa8d75939106323e38e06c7b8b85ce954}, + all_ss_ids = {['aefbf6afa8d75939106323e38e06c7b8b85ce954']}, } @article{Heri19a, @@ -8916,6 +11273,9 @@ @article{Heri19a optnote = {DIAG, RADIOLOGY}, pmid = {31538274}, month = {9}, + ss_id = {e50f0d6b5c1b710f13e9ebe2b3e2448518c8e26d}, + all_ss_ids = {['e50f0d6b5c1b710f13e9ebe2b3e2448518c8e26d']}, + gscites = {16}, } @inproceedings{Heri19b, @@ -8932,6 +11292,9 @@ @inproceedings{Heri19b file = {Heri19b.pdf:pdf\\Heri19b.pdf:PDF}, optnote = {DIAG. RADIOLOGY}, month = {3}, + ss_id = {8dff3f1ad2cb2f512ea8c8ac7523699b4f9e82c5}, + all_ss_ids = {['8dff3f1ad2cb2f512ea8c8ac7523699b4f9e82c5']}, + gscites = {19}, } @inproceedings{Heri20, @@ -8944,17 +11307,9 @@ @inproceedings{Heri20 file = {Heri20.pdf:pdf\\Heri20.pdf:PDF}, optnote = {DIAG}, year = {2020}, -} - -@article{Heri21d, - author = {Alessa Hering and Lasse Hansen and Tony C. W. Mok and Albert C. S. Chung and Hanna Siebert and Stephanie Hager and Annkristin Lange and Sven Kuckertz and Stefan Heldmann and Wei Shao and Sulaiman Vesal and Mirabela Rusu and Geoffrey Sonn and Theo Estienne and Maria Vakalopoulou and Luyi Han and Yunzhi Huang and Pew-Thian Yap and Mikael Brudfors and Yael Balbastre and Samuel Joutard and Marc Modat and Gal Lifshitz and Dan Raviv and Jinxin Lv and Qiang Li and Vincent Jaouen and Dimitris Visvikis and Constance Fourcade and Mathieu Rubeaux and Wentao Pan and Zhe Xu and Bailiang Jian and Francesca De Benetti and Marek Wodzinski and Niklas Gunnarsson and Jens Sjolund and Daniel Grzech and Huaqi Qiu and Zeju Li and Alexander Thorley and Jinming Duan and Christoph Grossbrohmer and Andrew Hoopes and Ingerid Reinertsen and Yiming Xiao and Bennett Landman and Yuankai Huo and Keelin Murphy and Nikolas Lessmann and Bram van Ginneken and Adrian V. Dalca and Mattias P. Heinrich}, - title = {Learn2Reg: comprehensive multi-task medical image registration challenge, dataset and evaluation in the era of deep learning}, - abstract = {Image registration is a fundamental medical image analysis task, and a wide variety of approaches have been proposed. However, only a few studies have comprehensively compared medical image registration approaches on a wide range of clinically relevant tasks, in part because of the lack of availability of such diverse data. This limits the development of registration methods, the adoption of research advances into practice, and a fair benchmark across competing approaches. The Learn2Reg challenge addresses these limitations by providing a multi-task medical image registration benchmark for comprehensive characterisation of deformable registration algorithms. A continuous evaluation will be possible at \url{https://learn2reg.grand-challenge.org}. - Learn2Reg covers a wide range of anatomies (brain, abdomen, and thorax), modalities (ultrasound, CT, MR), availability of annotations, as well as intra- and inter-patient registration evaluation. We established an easily accessible framework for training and validation of 3D registration methods, which enabled the compilation of results of over 65 individual method submissions from more than 20 unique teams. We used a complementary set of metrics, including robustness, accuracy, plausibility, and runtime, enabling unique insight into the current state-of-the-art of medical image registration. This paper describes datasets, tasks, evaluation methods and results of the challenge, and the results of further analysis of transferability to new datasets, the importance of label supervision, and resulting bias.}, - file = {Heri21d.pdf:pdf\\Heri21d.pdf:PDF}, - journal = {arXiv preprint arXiv:2112.04489}, - optnote = {DIAG, RADIOLOGY}, - year = {2021}, + ss_id = {1138a76e62c45bfa0a1daddf7ac45f826c2d5222}, + all_ss_ids = {['1138a76e62c45bfa0a1daddf7ac45f826c2d5222']}, + gscites = {0}, } @article{Heri21, @@ -8970,6 +11325,9 @@ @article{Heri21 optnote = {DIAG, RADIOLOGY}, algorithm = {https://grand-challenge.org/algorithms/deep-learning-based-ct-lung-registration/}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/236792}, + ss_id = {3c649c514443775e25e096cff35d58e393659ac4}, + all_ss_ids = {['3c649c514443775e25e096cff35d58e393659ac4', 'ee17bc918583166ad08ff307be5f77cb130486ea']}, + gscites = {38}, } @inproceedings{Heri21a, @@ -8980,29 +11338,61 @@ @inproceedings{Heri21a url = {https://openreview.net/forum?id=hzbuHGhU02Z}, optnote = {DIAG, RADIOLOGY}, abstract = {In follow-up CT examinations of cancer patients, therapy success is evaluated by estimating the change in tumor size. This process is time-consuming and error-prone. We present a pipeline that automates the segmentation and measurement of matching lesions, given a point annotation in the baseline lesion. First, a region around the point annotation is extracted, in which a deep-learning-based segmentation of the lesion is performed. Afterward, a registration algorithm finds the corresponding image region in the follow-up scan and the convolutional neural network segments lesions inside this region. In the final step, the corresponding lesion is selected. We evaluate our pipeline on clinical follow-up data comprising 125 soft-tissue lesions from 43 patients with metastatic melanoma. Our pipeline succeeded for 96% of the baseline and 80% of the follow-up lesions, showing that we have laid the foundation for an efficient quantitative follow-up assessment in clinical routine.}, + ss_id = {3a0446285a7db63672c263d701e7a5c16b5db113}, + all_ss_ids = {['3a0446285a7db63672c263d701e7a5c16b5db113']}, + gscites = {2}, } -@phdthesis{Heri22, - author = {Alessa Hering}, - title = {Deep-Learning-Based Image Registration and Tumor Follow-Up Analysis}, - url = {https://repository.ubn.ru.nl/handle/2066/273914}, - abstract = {This thesis is focused on the development of deep-learning based image registration approaches and on efficient tumor follow-up analysis. Chapter 2 describes a method for a memory-efficient weakly-supervised deep-learning model for multi-modal image registration. The method combines three 2D networks into a 2.5D registration network. Chapter 3 presents a multilevel approach for deep learning-based image registration. Chapter 4 describes a method that incorporates multiple anatomical constraints as anatomical priors into the registration network applied to CT lung registration. Chapter 5 presents the results of the Learn2Reg challenge and compares several conventional and deep-learning-based registration methods. Chapter 6 describes a pipeline that automates the segmentation and measurement of matching lesions, given a point annotation in the baseline lesion. The pipeline is based on a registration approach to locate corresponding image regions and a convolutional neural network to segment the lesion in the follow-up image. Chapter 7 presents the reader study, which investigates whether the assessment time for follow-up lesion segmentations is reduced by AI-assisted workflow while maintaining the same quality of segmentations.}, - file = {Heri22.pdf:pdf/Heri22.pdf:PDF}, - optnote = {DIAG}, - school = {Radboud University}, - copromotor = {N. Lessmann and S. Heldmann}, +@article{Heri21d, + author = {Alessa Hering and Lasse Hansen and Tony C. W. Mok and Albert C. S. Chung and Hanna Siebert and Stephanie Hager and Annkristin Lange and Sven Kuckertz and Stefan Heldmann and Wei Shao and Sulaiman Vesal and Mirabela Rusu and Geoffrey Sonn and Theo Estienne and Maria Vakalopoulou and Luyi Han and Yunzhi Huang and Pew-Thian Yap and Mikael Brudfors and Yael Balbastre and Samuel Joutard and Marc Modat and Gal Lifshitz and Dan Raviv and Jinxin Lv and Qiang Li and Vincent Jaouen and Dimitris Visvikis and Constance Fourcade and Mathieu Rubeaux and Wentao Pan and Zhe Xu and Bailiang Jian and Francesca De Benetti and Marek Wodzinski and Niklas Gunnarsson and Jens Sjolund and Daniel Grzech and Huaqi Qiu and Zeju Li and Alexander Thorley and Jinming Duan and Christoph Grossbrohmer and Andrew Hoopes and Ingerid Reinertsen and Yiming Xiao and Bennett Landman and Yuankai Huo and Keelin Murphy and Nikolas Lessmann and Bram van Ginneken and Adrian V. Dalca and Mattias P. Heinrich}, + title = {Learn2Reg: comprehensive multi-task medical image registration challenge, dataset and evaluation in the era of deep learning}, + abstract = {Image registration is a fundamental medical image analysis task, and a wide variety of approaches have been proposed. However, only a few studies have comprehensively compared medical image registration approaches on a wide range of clinically relevant tasks, in part because of the lack of availability of such diverse data. This limits the development of registration methods, the adoption of research advances into practice, and a fair benchmark across competing approaches. The Learn2Reg challenge addresses these limitations by providing a multi-task medical image registration benchmark for comprehensive characterisation of deformable registration algorithms. A continuous evaluation will be possible at \url{https://learn2reg.grand-challenge.org}. + Learn2Reg covers a wide range of anatomies (brain, abdomen, and thorax), modalities (ultrasound, CT, MR), availability of annotations, as well as intra- and inter-patient registration evaluation. We established an easily accessible framework for training and validation of 3D registration methods, which enabled the compilation of results of over 65 individual method submissions from more than 20 unique teams. We used a complementary set of metrics, including robustness, accuracy, plausibility, and runtime, enabling unique insight into the current state-of-the-art of medical image registration. This paper describes datasets, tasks, evaluation methods and results of the challenge, and the results of further analysis of transferability to new datasets, the importance of label supervision, and resulting bias.}, + file = {Heri21d.pdf:pdf\\Heri21d.pdf:PDF}, + journal = {arXiv preprint arXiv:2112.04489}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, + ss_id = {2e09fa7387659a79f41d809ce40d32cc8c847bb7}, + all_ss_ids = {['2e09fa7387659a79f41d809ce40d32cc8c847bb7']}, + gscites = {72}, +} + +@phdthesis{Heri22, + author = {Alessa Hering}, + title = {Deep-Learning-Based Image Registration and Tumor Follow-Up Analysis}, + url = {https://repository.ubn.ru.nl/handle/2066/273914}, + abstract = {This thesis is focused on the development of deep-learning based image registration approaches and on efficient tumor follow-up analysis. Chapter 2 describes a method for a memory-efficient weakly-supervised deep-learning model for multi-modal image registration. The method combines three 2D networks into a 2.5D registration network. Chapter 3 presents a multilevel approach for deep learning-based image registration. Chapter 4 describes a method that incorporates multiple anatomical constraints as anatomical priors into the registration network applied to CT lung registration. Chapter 5 presents the results of the Learn2Reg challenge and compares several conventional and deep-learning-based registration methods. Chapter 6 describes a pipeline that automates the segmentation and measurement of matching lesions, given a point annotation in the baseline lesion. The pipeline is based on a registration approach to locate corresponding image regions and a convolutional neural network to segment the lesion in the follow-up image. Chapter 7 presents the reader study, which investigates whether the assessment time for follow-up lesion segmentations is reduced by AI-assisted workflow while maintaining the same quality of segmentations.}, + file = {Heri22.pdf:pdf/Heri22.pdf:PDF}, + optnote = {DIAG}, + school = {Radboud University}, + copromotor = {N. Lessmann and S. Heldmann}, promotor = {B. van Ginneken, H.K. Hahn}, year = {2022}, journal = {PhD thesis}, } +@book{Heri22a, + author = {Hering, Alessa and Lange, Annkristin and Heldmann, Stefan and H\"{a}ger, Stephanie and Kuckertz, Sven}, + title = {~~Fraunhofer MEVIS Image Registration Solutions for the Learn2Reg 2021 Challenge}, + doi = {10.1007/978-3-030-97281-3_21}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-030-97281-3_21}, + file = {Heri22a.pdf:pdf\\Heri22a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Biomedical Image Registration, Domain Generalisation and Out-of-Distribution Analysis}, + automatic = {yes}, + citation-count = {0}, + pages = {147-152}, +} + @conference{Herm16, author = {Hermsen, Meyke and van der Laak, Jeroen}, title = {Highly multiplexed immunofluorescence using spectral imaging}, booktitle = {DPA's Pathology Visions Conference 2016, San Diego, CA, US}, year = {2016}, abstract = {Research into cancer biomarkers often comprises testing of a number of potentially relevant markers in tissue sections. Use of single antibody immunohistochemistry, however, limits the amount of information available from the analysis. One single component can be visualized at a time, prohibiting the simultaneous assessment of multiple markers. Immunofluorescence is a widely used alternative, enabling two or three simultaneous markers. In, for instance, the study of tumor infiltrating lymphocytes one may wish to use multiplex immunophenotyping to identify the relevant subtypes of T-cells. This may require more than three markers in a single section. Also, because of availability, it is not always possible to compile panels of markers raised in different species to prevent cross-reactivity. - This workshop will focus on highly multiplexed imaging, allowing up to 7 markers in a single section. The method comprises a staining procedure consisting of consecutive steps of IHC staining with tyramid signal amplification and microwave stripping of the antibody. Every antibody is labeled with a different fluorescent dye. Subsequent imaging is performed using a fully automated multispectral imaging setup. This approach enables the use of antibodies raised in the same species (e.g. mouse MAbs) and is capable of eliminating the effect of autofluorescence. The workshop will consist of two parts. First the specific staining techniques will be treated. In the second part we will focus on the imaging and analysis options. There will be ample opportunities for questions and discussion.}, + This workshop will focus on highly multiplexed imaging, allowing up to 7 markers in a single section. The method comprises a staining procedure consisting of consecutive steps of IHC staining with tyramid signal amplification and microwave stripping of the antibody. Every antibody is labeled with a different fluorescent dye. Subsequent imaging is performed using a fully automated multispectral imaging setup. This approach enables the use of antibodies raised in the same species (e.g. mouse MAbs) and is capable of eliminating the effect of autofluorescence. The workshop will consist of two parts. First the specific staining techniques will be treated. In the second part we will focus on the imaging and analysis options. There will be ample opportunities for questions and discussion.}, file = {Herm16.pdf:pdf\\Herm16.pdf:PDF}, optnote = {DIAG}, } @@ -9038,19 +11428,21 @@ @article{Herm19 doi = {10.1681/ASN.2019020144}, url = {https://jasn.asnjournals.org/content/30/10/1968}, abstract = {Background: The development of deep neural networks is facilitating more advanced digital analysis of histopathologic images. We trained a convolutional neural network for multiclass segmentation of digitized kidney tissue sections stained with periodic acid-Schiff (PAS). Methods: We trained the network using multiclass annotations from 40 whole-slide images of stained - kidney transplant biopsies and applied it to four independent data sets. We assessed multiclass segmentation performance by calculating Dice coefficients for ten tissue classes on ten transplant biopsies from the Radboud University Medical Center in Nijmegen, The Netherlands, and on ten transplant biopsies from an external center for validation. We also fully segmented 15 nephrectomy samples and calculated the - network's glomerular detection rates and compared network-based measures with visually scored histologic components (Banff classification) in 82 kidney transplant biopsies. - Results: The weighted mean Dice coefficients of all classes were 0.80 and 0.84 in ten kidney transplant biopsies from the Radboud center and the external center, respectively. The best segmented class was - "glomeruli? in both data sets (Dice coefficients, 0.95 and 0.94, respectively), followed by "tubuli combined? and "interstitium.? The network detected 92.7% of all glomeruli in nephrectomy samples, with - 10.4% false positives. In whole transplant biopsies, the mean intraclass correlation coefficient for glomerular counting performed by pathologists versus the network was 0.94. We found significant correlations - between visually scored histologic components and network-based measures. Conclusions: This study presents the first convolutional neural network formulticlass segmentation of PASstained - nephrectomy samples and transplant biopsies. Our network may have utility for quantitative studies involving kidney histopathology across centers and provide opportunities for deep learning applications in routine diagnostics.}, + kidney transplant biopsies and applied it to four independent data sets. We assessed multiclass segmentation performance by calculating Dice coefficients for ten tissue classes on ten transplant biopsies from the Radboud University Medical Center in Nijmegen, The Netherlands, and on ten transplant biopsies from an external center for validation. We also fully segmented 15 nephrectomy samples and calculated the + network's glomerular detection rates and compared network-based measures with visually scored histologic components (Banff classification) in 82 kidney transplant biopsies. + Results: The weighted mean Dice coefficients of all classes were 0.80 and 0.84 in ten kidney transplant biopsies from the Radboud center and the external center, respectively. The best segmented class was + "glomeruli? in both data sets (Dice coefficients, 0.95 and 0.94, respectively), followed by "tubuli combined? and "interstitium.? The network detected 92.7% of all glomeruli in nephrectomy samples, with + 10.4% false positives. In whole transplant biopsies, the mean intraclass correlation coefficient for glomerular counting performed by pathologists versus the network was 0.94. We found significant correlations + between visually scored histologic components and network-based measures. Conclusions: This study presents the first convolutional neural network formulticlass segmentation of PASstained + nephrectomy samples and transplant biopsies. Our network may have utility for quantitative studies involving kidney histopathology across centers and provide opportunities for deep learning applications in routine diagnostics.}, file = {Herm19.pdf:pdf\\Herm19.pdf:PDF}, optnote = {DIAG}, pmid = {31488607}, month = {9}, gsid = {7739327770842655007}, - gscites = {25}, + gscites = {193}, + ss_id = {fa59a6c087ec8b88c3828a95de5e0662cd2eee7e}, + all_ss_ids = {['fa59a6c087ec8b88c3828a95de5e0662cd2eee7e']}, } @conference{Herm19a, @@ -9070,6 +11462,9 @@ @article{Herm20 file = {Herm20.pdf:pdf\\Herm20.pdf:PDF}, optnote = {DIAG, INPRESS}, pmid = {32995871}, + ss_id = {3f9c978ea44205703f337574f5e07eb9d7ecb523}, + all_ss_ids = {['3f9c978ea44205703f337574f5e07eb9d7ecb523']}, + gscites = {5}, } @article{Herm21, @@ -9087,6 +11482,8 @@ @article{Herm21 pmid = {34006891}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/238916}, + all_ss_ids = {['3881ae992914cd50a14782104170cc5dd5d9ae7e', 'a4d4e42f1accfd77f378549c776b7c850afbcda5']}, + gscites = {25}, } @article{Herm22, @@ -9099,11 +11496,14 @@ @article{Herm22 pages = {1418-1432}, doi = {https://doi.org/10.1016/j.ajpath.2022.06.009}, abstract = {In kidney transplant biopsies, both inflammation and chronic changes are important features that predict long-term graft survival. Quantitative scoring of these features is important for transplant diagnostics and kidney research. However, visual scoring is poorly reproducible and labor-intensive. The goal of this study was to investigate the potential of convolutional neural networks (CNNs) to quantify inflammation and chronic features in kidney transplant biopsies. - A structure segmentation CNN and a lymphocyte detection CNN were applied on 125 whole-slide image pairs of PAS-, and CD3-stained slides. The CNN results were used to quantify healthy and sclerotic glomeruli, interstitial fibrosis, tubular atrophy, and inflammation both within non-atrophic and atrophic tubuli, and in areas of interstitial fibrosis. The computed tissue features showed high correlations with Banff lesion scores of five pathologists. Analyses on a small subset showed a moderate correlation towards higher CD3+ cell density within scarred regions and higher CD3+ cell count inside atrophic tubuli correlated with long-term change of estimated glomerular filtration rate. - The presented CNNs are valid tools to yield objective quantitative information on glomeruli number, fibrotic tissue, and inflammation within scarred and non-scarred kidney parenchyma in a reproducible fashion. CNNs have the potential to improve kidney transplant diagnostics and will benefit the community as a novel method to generate surrogate endpoints for large-scale clinical studies.}, + A structure segmentation CNN and a lymphocyte detection CNN were applied on 125 whole-slide image pairs of PAS-, and CD3-stained slides. The CNN results were used to quantify healthy and sclerotic glomeruli, interstitial fibrosis, tubular atrophy, and inflammation both within non-atrophic and atrophic tubuli, and in areas of interstitial fibrosis. The computed tissue features showed high correlations with Banff lesion scores of five pathologists. Analyses on a small subset showed a moderate correlation towards higher CD3+ cell density within scarred regions and higher CD3+ cell count inside atrophic tubuli correlated with long-term change of estimated glomerular filtration rate. + The presented CNNs are valid tools to yield objective quantitative information on glomeruli number, fibrotic tissue, and inflammation within scarred and non-scarred kidney parenchyma in a reproducible fashion. CNNs have the potential to improve kidney transplant diagnostics and will benefit the community as a novel method to generate surrogate endpoints for large-scale clinical studies.}, file = {Herm22.pdf:pdf\\Herm22.pdf:PDF}, optnote = {DIAG}, pmid = {35843265}, + ss_id = {1d7cb6f85cf6478da6fef4d5630f02aa3a053f8c}, + all_ss_ids = {['1d7cb6f85cf6478da6fef4d5630f02aa3a053f8c']}, + gscites = {14}, } @inproceedings{Heuv15, @@ -9120,7 +11520,9 @@ @inproceedings{Heuv15 optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {17467758196509053337}, - gscites = {5}, + gscites = {8}, + ss_id = {1fd0e5d0355bb0798803da2ff5e743d1039819bb}, + all_ss_ids = {['1fd0e5d0355bb0798803da2ff5e743d1039819bb']}, } @article{Heuv16, @@ -9132,17 +11534,19 @@ @article{Heuv16 pages = {241 - 251}, doi = {10.1016/j.nicl.2016.07.002}, abstract = {In this paper a Computer Aided Detection (CAD) system is presented to automatically detect Cerebral Microbleeds (CMBs) in patients with Traumatic Brain Injury (TBI). It is believed that the presence of CMBs has clinical prognostic value in TBI patients. To study the contribution of CMBs in patient outcome, accurate detection of CMBs is required. Manual detection of CMBs in TBI patients is a time consuming task that is prone to errors, because CMBs are easily overlooked and are difficult to distinguish from blood vessels. - - This study included 33 TBI patients. Because of the laborious nature of manually annotating CMBs, only one trained expert manually annotated the CMBs in all 33 patients. A subset of ten TBI patients was annotated by six experts. Our CAD system makes use of both Susceptibility Weighted Imaging (SWI) and T1 weighted magnetic resonance images to detect CMBs. After pre-processing these images, a two-step approach was used for automated detection of CMBs. In the first step, each voxel was characterized by twelve features based on the dark and spherical nature of CMBs and a random forest classifier was used to identify CMB candidate locations. In the second step, segmentations were made from each identified candidate location. Subsequently an object-based classifier was used to remove false positive detections of the voxel classifier, by considering seven object-based features that discriminate between spherical objects (CMBs) and elongated objects (blood vessels). A guided user interface was designed for fast evaluation of the CAD system result. During this process, an expert checked each CMB detected by the CAD system. - - A Fleiss' kappa value of only 0.24 showed that the inter-observer variability for the TBI patients in this study was very large. An expert using the guided user interface reached an average sensitivity of 93%, which was significantly higher (p = 0.03) than the average sensitivity of 77% (sd 12.4%) that the six experts manually detected. Furthermore, with the use of this CAD system the reading time was substantially reduced from one hour to 13 minutes per patient, because the CAD system only detects on average 25.9 false positives per TBI patient, resulting in 0.29 false positives per definite CMB finding.}, + + This study included 33 TBI patients. Because of the laborious nature of manually annotating CMBs, only one trained expert manually annotated the CMBs in all 33 patients. A subset of ten TBI patients was annotated by six experts. Our CAD system makes use of both Susceptibility Weighted Imaging (SWI) and T1 weighted magnetic resonance images to detect CMBs. After pre-processing these images, a two-step approach was used for automated detection of CMBs. In the first step, each voxel was characterized by twelve features based on the dark and spherical nature of CMBs and a random forest classifier was used to identify CMB candidate locations. In the second step, segmentations were made from each identified candidate location. Subsequently an object-based classifier was used to remove false positive detections of the voxel classifier, by considering seven object-based features that discriminate between spherical objects (CMBs) and elongated objects (blood vessels). A guided user interface was designed for fast evaluation of the CAD system result. During this process, an expert checked each CMB detected by the CAD system. + + A Fleiss' kappa value of only 0.24 showed that the inter-observer variability for the TBI patients in this study was very large. An expert using the guided user interface reached an average sensitivity of 93%, which was significantly higher (p = 0.03) than the average sensitivity of 77% (sd 12.4%) that the six experts manually detected. Furthermore, with the use of this CAD system the reading time was substantially reduced from one hour to 13 minutes per patient, because the CAD system only detects on average 25.9 false positives per TBI patient, resulting in 0.29 false positives per definite CMB finding.}, file = {Heuv16.pdf:pdf\\Heuv16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {27489772}, publisher = {Elsevier}, month = {2}, gsid = {7154632919206928793}, - gscites = {37}, + gscites = {61}, + ss_id = {490b2120bffd758919d924ee1b062789aed16ef3}, + all_ss_ids = {['490b2120bffd758919d924ee1b062789aed16ef3']}, } @inproceedings{Heuv17, @@ -9155,14 +11559,16 @@ @inproceedings{Heuv17 pages = {101390V}, doi = {10.1117/12.2253671}, abstract = {Worldwide, 99% of all maternal deaths occur in low-resource countries. Ultrasound imaging can be used to detect maternal risk factors, but requires a well-trained sonographer to obtain the biometric parameters of the fetus. One of the most important biometric parameters is the fetal Head Circumference (HC). The HC can be used to estimate the Gestational Age (GA) and assess the growth of the fetus. In this paper we propose a method to estimate the fetal HC with the use of the Obstetric Sweep Protocol (OSP). With the OSP the abdomen of pregnant women is imaged with the use of sweeps. These sweeps can be taught to somebody without any prior knowledge of ultrasound within a day. - Both the OSP and the standard two-dimensional ultrasound image for HC assessment were acquired by an experienced gynecologist from fifty pregnant women in St. Luke's Hospital in Wolisso, Ethiopia. The reference HC from the standard two-dimensional ultrasound image was compared to both the manually measured HC and the automatically measured HC from the OSP data. - The median difference between the estimated GA from the manual measured HC using the OSP and the reference standard was -1.1 days (Median Absolute Deviation (MAD) 7.7 days). The median difference between the estimated GA from the automatically measured HC using the OSP and the reference standard was -6.2 days (MAD 8.6 days). - Therefore, it can be concluded that it is possible to estimate the fetal GA with simple obstetric sweeps with a deviation of only one week.}, + Both the OSP and the standard two-dimensional ultrasound image for HC assessment were acquired by an experienced gynecologist from fifty pregnant women in St. Luke's Hospital in Wolisso, Ethiopia. The reference HC from the standard two-dimensional ultrasound image was compared to both the manually measured HC and the automatically measured HC from the OSP data. + The median difference between the estimated GA from the manual measured HC using the OSP and the reference standard was -1.1 days (Median Absolute Deviation (MAD) 7.7 days). The median difference between the estimated GA from the automatically measured HC using the OSP and the reference standard was -6.2 days (MAD 8.6 days). + Therefore, it can be concluded that it is possible to estimate the fetal GA with simple obstetric sweeps with a deviation of only one week.}, file = {Heuv17.pdf:pdf\\Heuv17.pdf:PDF}, optnote = {DIAG, MUSIC, RADIOLOGY}, month = {3}, gsid = {11830984244326787685}, - gscites = {4}, + gscites = {5}, + ss_id = {f8d5915101494c63b044909b93b821caa688f220}, + all_ss_ids = {['f8d5915101494c63b044909b93b821caa688f220']}, } @article{Heuv17a, @@ -9195,7 +11601,9 @@ @inproceedings{Heuv17b file = {Heuv17b.pdf:pdf\\Heuv17b.pdf:PDF}, optnote = {DIAG, MUSIC, RADIOLOGY}, gsid = {948935549850201486}, - gscites = {1}, + gscites = {3}, + ss_id = {aa1ddbeb489b3e103f3dd681ae20ac0014af2209}, + all_ss_ids = {['aa1ddbeb489b3e103f3dd681ae20ac0014af2209']}, } @conference{Heuv17d, @@ -9222,8 +11630,10 @@ @article{Heuv18 pmid = {30093339}, month = {11}, gsid = {3596078569620781703}, - gscites = {2}, + gscites = {11}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/196891}, + ss_id = {80d1a72e3c8bf0e3457492b1f27a05ad6f5dd9a8}, + all_ss_ids = {['80d1a72e3c8bf0e3457492b1f27a05ad6f5dd9a8']}, } @article{Heuv18a, @@ -9241,7 +11651,9 @@ @article{Heuv18a pmid = {30138319}, publisher = {Public Library of Science}, gsid = {11316581470777681571}, - gscites = {26}, + gscites = {137}, + ss_id = {1427f469d6fb172d200d6f25837cbeaddad425e5}, + all_ss_ids = {['1427f469d6fb172d200d6f25837cbeaddad425e5']}, } @phdthesis{Heuv18b, @@ -9263,31 +11675,31 @@ @conference{Heuv19 booktitle = DBME, year = {2019}, abstract = {Worldwide, 99% of all maternal deaths occur in developing countries. Ultrasound can be used - to detect maternal risk factors, but this technique is rarely used in developing countries - because it is too expensive, and it requires a trained sonographer to acquire and interpret the - ultrasound images. In this work we use a low-cost ultrasound device which was combined - with the obstetric sweep protocol (OSP) and deep learning algorithms to automatically detect - maternal risk factors. The OSP can be taught to any health care worker without prior - knowledge of ultrasound within one day, so there is no need for a trained sonographer. - The OSP was acquired from 318 pregnant women using the low-cost MicrUs (Telemed - Ultrasound Medical Systems, Milan, Italy) in Ethiopia. Two deep learning networks and two - random forest classifiers were trained to automatically detect twin pregnancies, estimate - gestational age (GA) and determine fetal presentation. The first deep learning network - performs a frame classification, which was used to automatically separate the six sweeps of - the OSP and automatically detect the fetal head and torso. The second deep learning network - was trained to measure the fetal head circumference (HC) using all frames in which the first - deep learning system detected the fetal head. The HC was used to determine the GA. Two - random forest classifiers were trained to detect twin pregnancies and determine fetal - presentation using the frame classification of the first deep learning network. - The developed algorithm can automatically estimate the GA with an interquartile range of - 15.2 days, correctly detected 61% of all twins with a specificity of 99%, and correctly detect - all 31 breech presentations and 215 of the 216 cephalic presentations. The developed - algorithm can be computed in less than two seconds, making real-time application feasible. - The presented system is able to determine three maternal risk factors using the OSP. The OSP - can be acquired without the need of a trained sonographer, which makes widespread obstetric - ultrasound affordable and fast to implement in resource-limited settings. This makes is - possible to refer pregnant women in time to a hospital to receive treatment when risk factors - are detected.}, + to detect maternal risk factors, but this technique is rarely used in developing countries + because it is too expensive, and it requires a trained sonographer to acquire and interpret the + ultrasound images. In this work we use a low-cost ultrasound device which was combined + with the obstetric sweep protocol (OSP) and deep learning algorithms to automatically detect + maternal risk factors. The OSP can be taught to any health care worker without prior + knowledge of ultrasound within one day, so there is no need for a trained sonographer. + The OSP was acquired from 318 pregnant women using the low-cost MicrUs (Telemed + Ultrasound Medical Systems, Milan, Italy) in Ethiopia. Two deep learning networks and two + random forest classifiers were trained to automatically detect twin pregnancies, estimate + gestational age (GA) and determine fetal presentation. The first deep learning network + performs a frame classification, which was used to automatically separate the six sweeps of + the OSP and automatically detect the fetal head and torso. The second deep learning network + was trained to measure the fetal head circumference (HC) using all frames in which the first + deep learning system detected the fetal head. The HC was used to determine the GA. Two + random forest classifiers were trained to detect twin pregnancies and determine fetal + presentation using the frame classification of the first deep learning network. + The developed algorithm can automatically estimate the GA with an interquartile range of + 15.2 days, correctly detected 61% of all twins with a specificity of 99%, and correctly detect + all 31 breech presentations and 215 of the 216 cephalic presentations. The developed + algorithm can be computed in less than two seconds, making real-time application feasible. + The presented system is able to determine three maternal risk factors using the OSP. The OSP + can be acquired without the need of a trained sonographer, which makes widespread obstetric + ultrasound affordable and fast to implement in resource-limited settings. This makes is + possible to refer pregnant women in time to a hospital to receive treatment when risk factors + are detected.}, file = {Heuv19.pdf:pdf\\Heuv19.pdf:PDF}, optnote = {DIAG, MUSIC, RADIOLOGY}, } @@ -9307,8 +11719,10 @@ @article{Heuv19a pmid = {30573305}, month = {3}, gsid = {7628412435066869852}, - gscites = {15}, + gscites = {66}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/201329}, + ss_id = {f05cc42c38bae2cfbd8a283fc97a8bdede43dabe}, + all_ss_ids = {['f05cc42c38bae2cfbd8a283fc97a8bdede43dabe']}, } @inproceedings{Heuv19b, @@ -9320,6 +11734,9 @@ @inproceedings{Heuv19b abstract = {In this study, we combine a standardized acquisition protocol with image analysis algorithms to investigate if it is possible to automatically detect maternal risk factors without a trained sonographer. The standardized acquisition protocol can be taught to any health care worker within two hours. This protocol was acquired from 280 pregnant women at St. Luke's Catholic Hospital, Wolisso, Ethiopia. A VGG-like network was used to perform a frame classification for each frame within the acquired ultrasound data. This frame classification was used to automatically determine the number of fetuses and the fetal presentation. A U-net was trained to measure the fetal head circumference in all frames in which the VGG-like network detected a fetal head. This head circumference was used to estimate the gestational age. The results show that it possible automatically determine gestational age and determine fetal presentation and the potential to detect twin pregnancies using the standardized acquisition protocol.}, file = {Heuv19b.pdf:pdf\\Heuv19b.pdf:PDF;:png/publications/Heuv19b.png:PNG}, optnote = {DIAG, MUSIC, RADIOLOGY}, + ss_id = {631e2d528d8ae1e1817f73503eadfec39b35a428}, + all_ss_ids = {['631e2d528d8ae1e1817f73503eadfec39b35a428']}, + gscites = {4}, } @article{Hoek09, @@ -9334,6 +11751,9 @@ @article{Hoek09 optnote = {DIAG, MAGIC, RADIOLOGY}, pmid = {20003566}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/79762}, + ss_id = {dd8e6561ed2730e2c4cc6e6204fde7d60a2930f7}, + all_ss_ids = {['dd8e6561ed2730e2c4cc6e6204fde7d60a2930f7']}, + gscites = {8}, } @article{Hoek11a, @@ -9370,6 +11790,8 @@ @article{Hoes11 gsid = {10827837198402292096}, gscites = {26}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/97657}, + ss_id = {8cce5c02b19e5f77784efa75cc6aac29c507a602}, + all_ss_ids = {['8cce5c02b19e5f77784efa75cc6aac29c507a602']}, } @article{Hoes12, @@ -9387,8 +11809,10 @@ @article{Hoes12 pmid = {22323577}, month = {2}, gsid = {1084747010435036452}, - gscites = {49}, + gscites = {69}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110461}, + ss_id = {f2b3202d9635c6ec6114a21814eb02638ac82a05}, + all_ss_ids = {['f2b3202d9635c6ec6114a21814eb02638ac82a05']}, } @article{Hoes12a, @@ -9406,6 +11830,9 @@ @article{Hoes12a year = {2012}, month = {12}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/108582}, + ss_id = {9b54fea05c39e8c1db7bcf55a970a1cc598a3398}, + all_ss_ids = {['9b54fea05c39e8c1db7bcf55a970a1cc598a3398']}, + gscites = {22}, } @article{Hoes13, @@ -9422,6 +11849,9 @@ @article{Hoes13 number = {6}, pmid = {23785411}, month = {6}, + ss_id = {c8656b4c7a205284545931f3c11b92f7219c5ffa}, + all_ss_ids = {['c8656b4c7a205284545931f3c11b92f7219c5ffa']}, + gscites = {17}, } @article{Hoff16, @@ -9440,7 +11870,9 @@ @article{Hoff16 optnote = {DIAG, RADIOLOGY}, pmid = {27116550}, gsid = {18147613682259186463}, - gscites = {9}, + gscites = {16}, + ss_id = {f8e3b007a829e53c9a9f7527d26d495c8463dd7f}, + all_ss_ids = {['f8e3b007a829e53c9a9f7527d26d495c8463dd7f', '37e383517c34818ad049af0aa763ad5906e9f51a']}, } @inproceedings{Hoge10, @@ -9457,7 +11889,9 @@ @inproceedings{Hoge10 optnote = {DIAG, RADIOLOGY, TB}, month = {3}, gsid = {3846607945750329588}, - gscites = {12}, + gscites = {14}, + ss_id = {9b206a6f4f5b4f3c98394e8109e7a04cf730d9d4}, + all_ss_ids = {['9b206a6f4f5b4f3c98394e8109e7a04cf730d9d4']}, } @inproceedings{Hoge10a, @@ -9474,7 +11908,9 @@ @inproceedings{Hoge10a optnote = {DIAG, RADIOLOGY, TB}, pmid = {20879456}, gsid = {938566649715148496}, - gscites = {71}, + gscites = {84}, + ss_id = {08e4dedc42f4673f3af944da663d5f50ada17176}, + all_ss_ids = {['08e4dedc42f4673f3af944da663d5f50ada17176']}, } @conference{Hoge10b, @@ -9512,8 +11948,10 @@ @article{Hoge12 pmid = {22998970}, month = {12}, gsid = {15700805333399506982}, - gscites = {45}, + gscites = {59}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110554}, + ss_id = {561164998d2d1b53d5709c2ddb84f4c306a82624}, + all_ss_ids = {['561164998d2d1b53d5709c2ddb84f4c306a82624']}, } @article{Hoge13, @@ -9531,8 +11969,10 @@ @article{Hoge13 pmid = {23822438}, month = {6}, gsid = {1997428185924918867}, - gscites = {6}, + gscites = {8}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/118444}, + ss_id = {a87ed4355c439f449612db464993e47922629779}, + all_ss_ids = {['a87ed4355c439f449612db464993e47922629779']}, } @article{Hoge13a, @@ -9550,8 +11990,10 @@ @article{Hoge13a pmid = {23880041}, month = {11}, gsid = {8722134071273679653}, - gscites = {17}, + gscites = {29}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/126283}, + ss_id = {3740f221395f8283dfc90cf82449321c96034d73}, + all_ss_ids = {['3740f221395f8283dfc90cf82449321c96034d73']}, } @phdthesis{Hoge13b, @@ -9566,6 +12008,8 @@ @phdthesis{Hoge13b promotor = {B. van Ginneken}, school = {Radboud University, Nijmegen}, journal = {PhD thesis}, + all_ss_ids = {['7a172048b86dc29f63206822728e6dd730086ff3']}, + gscites = {10}, } @article{Hoge15, @@ -9583,7 +12027,9 @@ @article{Hoge15 pmid = {25706581}, month = {12}, gsid = {9749688157397588773}, - gscites = {51}, + gscites = {73}, + ss_id = {bcdc96b82387f57a2cfda77682add942d2fe798c}, + all_ss_ids = {['bcdc96b82387f57a2cfda77682add942d2fe798c']}, } @article{Hoge17, @@ -9601,8 +12047,10 @@ @article{Hoge17 optnote = {DIAG, RADIOLOGY}, pmid = {28134985}, gsid = {11690052651496044399}, - gscites = {4}, + gscites = {5}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/175031}, + ss_id = {9ec13cc1fde3f48b96db330a11c523621f2ef6de}, + all_ss_ids = {['9ec13cc1fde3f48b96db330a11c523621f2ef6de']}, } @inproceedings{Holl14, @@ -9618,6 +12066,8 @@ @inproceedings{Holl14 optnote = {DIAG}, gsid = {5470278989230824793}, gscites = {7}, + ss_id = {56a1fe85a51f377f8985f38d5798b7037f90febb}, + all_ss_ids = {['56a1fe85a51f377f8985f38d5798b7037f90febb']}, } @conference{Holl15, @@ -9664,15 +12114,17 @@ @inproceedings{Holl16 series = SPIE, doi = {10.1117/12.2216810}, abstract = {The sensitivity of mammograms is low for women with dense breasts, since cancers may be masked by dense tissue. In this study, we investigated methods to identify women with density patterns associated with a high masking risk. Risk measures are derived from volumetric breast density maps. - We used the last negative screening mammograms of 93 women who subsequently presented with an interval cancer (IC), and, as controls, 930 randomly selected normal screening exams from women without cancer. Volumetric breast density maps were computed from the mammograms, which provide the dense tissue thickness at each location. These were used to compute absolute and percentage glandular tissue volume. - We modeled the masking risk for each pixel location using the absolute and percentage dense tissue thickness and we investigated the effect of taking the cancer location probability distribution (CLPD) into account. - For each method, we selected cases with the highest masking measure (by thresholding) and computed the fraction of ICs as a function of the fraction of controls selected. The latter can be interpreted as the negative supplemental screening rate (NSSR). - Between the models, when incorporating CLPD, no significant differences were found. In general, the methods performed better when CLPD was included. At higher NSSRs some of the investigated masking measures had a significantly higher performance than volumetric breast density. These measures may therefore serve as an alternative to identify women with a high risk for a masked cancer.}, + We used the last negative screening mammograms of 93 women who subsequently presented with an interval cancer (IC), and, as controls, 930 randomly selected normal screening exams from women without cancer. Volumetric breast density maps were computed from the mammograms, which provide the dense tissue thickness at each location. These were used to compute absolute and percentage glandular tissue volume. + We modeled the masking risk for each pixel location using the absolute and percentage dense tissue thickness and we investigated the effect of taking the cancer location probability distribution (CLPD) into account. + For each method, we selected cases with the highest masking measure (by thresholding) and computed the fraction of ICs as a function of the fraction of controls selected. The latter can be interpreted as the negative supplemental screening rate (NSSR). + Between the models, when incorporating CLPD, no significant differences were found. In general, the methods performed better when CLPD was included. At higher NSSRs some of the investigated masking measures had a significantly higher performance than volumetric breast density. These measures may therefore serve as an alternative to identify women with a high risk for a masked cancer.}, file = {:pdf/Holl16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {4492352441379685768}, gscites = {4}, + ss_id = {0dc755ded5e6a44e9ce9ac47bc69aa5573b458f0}, + all_ss_ids = {['0dc755ded5e6a44e9ce9ac47bc69aa5573b458f0']}, } @inproceedings{Holl16a, @@ -9686,12 +12138,14 @@ @inproceedings{Holl16a pages = {183-189}, doi = {10.1007/978-3-319-41546-8_24}, abstract = {During mammographic acquisition, the breast is compressed between the breast support plate and the compression paddle to improve image quality and reduce dose, among other reasons. The applied force, which is measured by the imaging device, varies substantially, due to local guidelines, positioning, and breast size. Force measurements may not be very relevant though, because the amount of compression will be related to pressure rather than force. With modern image analysis techniques, the contact surface of the breast under compression can be determined and pressure can be computed retrospectively. In this study, we investigate if there is a relation between pressure applied to the breast during compression and screening performance. - In a series of 113,464 screening exams from the Dutch breast cancer screening program we computed the compression pressure applied in the MLO projections of the right and left breasts. The exams were binned into five groups of increasing applied pressure, in such a way that each group contains 20% of the exams. Thresholds were 7.68, 9.18, 10.71 and 12.81 kPa. Screening performance measures were determined for each group. Differences across the groups were investigated with a Pearson's Chi Square test. It was found that PPV and the cancer detection rate vary significantly within the five groups (p = 0.001 and p = 0.011 respectively).The PPV was 25.4, 31.2, 32.7, 25.8 and 22.0 for the five groups with increasing pressure. The recall rate, false positive rate and specificity were not statistically significant from the expectation (p-values: 0.858, 0.088 and 0.094 respectively). Even though differences are not significant, there is a trend that the groups with a moderate pressure have a better performance compared to the first and last category. - The results suggest that high pressure reduces detectability of breast cancer. The best screening results were found in the groups with a moderate pressure.}, + In a series of 113,464 screening exams from the Dutch breast cancer screening program we computed the compression pressure applied in the MLO projections of the right and left breasts. The exams were binned into five groups of increasing applied pressure, in such a way that each group contains 20% of the exams. Thresholds were 7.68, 9.18, 10.71 and 12.81 kPa. Screening performance measures were determined for each group. Differences across the groups were investigated with a Pearson's Chi Square test. It was found that PPV and the cancer detection rate vary significantly within the five groups (p = 0.001 and p = 0.011 respectively).The PPV was 25.4, 31.2, 32.7, 25.8 and 22.0 for the five groups with increasing pressure. The recall rate, false positive rate and specificity were not statistically significant from the expectation (p-values: 0.858, 0.088 and 0.094 respectively). Even though differences are not significant, there is a trend that the groups with a moderate pressure have a better performance compared to the first and last category. + The results suggest that high pressure reduces detectability of breast cancer. The best screening results were found in the groups with a moderate pressure.}, file = {:pdf/Holl16a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {10535534135064321671}, - gscites = {14}, + gscites = {17}, + ss_id = {0b86365ed2e292ae5da868946983b31f7a9dc391}, + all_ss_ids = {['0b86365ed2e292ae5da868946983b31f7a9dc391']}, } @article{Holl16b, @@ -9709,7 +12163,9 @@ @article{Holl16b optnote = {DIAG, RADIOLOGY}, pmid = {27420382}, gsid = {8184360643827561041}, - gscites = {14}, + gscites = {20}, + ss_id = {e034856f4f0949b6ebbe7d4161e253719fdcb399}, + all_ss_ids = {['862ef80662ea6cd8e646df642abc3fd343263191', 'e034856f4f0949b6ebbe7d4161e253719fdcb399']}, } @article{Holl17, @@ -9723,15 +12179,17 @@ @article{Holl17 pages = {541--548}, doi = {10.1007/s10549-017-4137-4}, abstract = {Purpose: Fibroglandular tissue may mask breast cancers, thereby reducing the sensitivity of mammography. Here we investigate methods for identification of women at high risk of a masked tumor, who could benefit from additional imaging. - Methods: The last negative screening mammograms of 111 women with interval cancer (IC) within 12 months after the examination and 1110 selected normal screening exams from women without cancer were used. From the mammograms volumetric breast density maps were computed, which provide the dense tissue thickness for each pixel location. With these maps, three measurements were derived: 1) Percent dense volume (PDV), 2) Percent area where dense tissue thickness exceeds 1cm (PDA), 3) Dense Tissue Masking Model (DTMM). Breast density was scored by a breast radiologist using BI-RADS. Women with heterogeneously and extremely dense breasts were considered at high masking risk. For each masking measure, mammograms were divided into a high and low risk category, such that the same proportion of the controls is at high masking risk as with BI-RADS. - Results: Of the women with IC, 66.1%, 71.9%, 69.2% and 63.0% were categorized to be at high masking risk with PDV, PDA, DTMM and BI-RADS respectively, against 38.5% of the controls. The proportion of IC at high masking risk is statistically significantly different between BI-RADS and PDA (p-value 0.022). Differences between BI-RADS and PDV, or BI-RADS and DTMM, are not statistically significant. - Conclusion: Measures based on density maps, and in particular PDA, are promising tools to identify women at high risk for a masked cancer.}, + Methods: The last negative screening mammograms of 111 women with interval cancer (IC) within 12 months after the examination and 1110 selected normal screening exams from women without cancer were used. From the mammograms volumetric breast density maps were computed, which provide the dense tissue thickness for each pixel location. With these maps, three measurements were derived: 1) Percent dense volume (PDV), 2) Percent area where dense tissue thickness exceeds 1cm (PDA), 3) Dense Tissue Masking Model (DTMM). Breast density was scored by a breast radiologist using BI-RADS. Women with heterogeneously and extremely dense breasts were considered at high masking risk. For each masking measure, mammograms were divided into a high and low risk category, such that the same proportion of the controls is at high masking risk as with BI-RADS. + Results: Of the women with IC, 66.1%, 71.9%, 69.2% and 63.0% were categorized to be at high masking risk with PDV, PDA, DTMM and BI-RADS respectively, against 38.5% of the controls. The proportion of IC at high masking risk is statistically significantly different between BI-RADS and PDA (p-value 0.022). Differences between BI-RADS and PDV, or BI-RADS and DTMM, are not statistically significant. + Conclusion: Measures based on density maps, and in particular PDA, are promising tools to identify women at high risk for a masked cancer.}, file = {:pdf/Holl17.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {28161786}, publisher = {Springer Nature}, gsid = {14965501142326111206}, - gscites = {21}, + gscites = {32}, + ss_id = {907cc7a4cdfde916a2295b9e459564f6d51e1d24}, + all_ss_ids = {['907cc7a4cdfde916a2295b9e459564f6d51e1d24']}, } @article{Holl17a, @@ -9745,14 +12203,15 @@ @article{Holl17a pages = {3779--3797}, doi = {10.1088/1361-6560/aa628f}, abstract = {Fibroglandular tissue volume and percent density can be estimated in unprocessed mammograms using a physics-based method, which relies on an internal reference value representing the projection of fat only. However, pixels representing fat only may not be present in dense breasts, causing an underestimation of density measurements. In this work, we investigate alternative approaches for obtaining a tissue reference value to improve density estimations, particularly in dense breasts. - Two of three investigated reference values (F1, F2) are percentiles of the pixel value distribution in the breast interior (the contact area of breast and compression paddle). F1 is determined in a small breast interior, which minimizes the risk that peripheral pixels are included in the measurement at the cost of increasing the chance that no proper reference can be found. F2 is obtained using a larger breast interior. The new approach which is developed for very dense breasts does not require the presence of a fatty tissue region. As reference region we select the densest region in the mammogram and assume that this represents a projection of entirely dense tissue embedded between the subcutaneous fatty tissue layers. By measuring the thickness of the fat layers a reference (F3) can be computed. To obtain accurate breast density estimates irrespective of breast composition we investigated a combination of the results of the three reference values. We collected 202 pairs of MRI's and digital mammograms from 119 women. We compared the percent dense volume estimates based on both modalities and calculated Pearson's correlation coefficients. - With the references F1-F3 we found respectively a correlation of R=0.80, R=0.89 and R=0.74. Best results were obtained with the combination of the density estimations (R=0.90). - Results show that better volumetric density estimates can be obtained with the hybrid method, in particular for dense breasts, when algorithms are combined to obtain a fatty tissue reference value depending on breast composition.}, + Two of three investigated reference values (F1, F2) are percentiles of the pixel value distribution in the breast interior (the contact area of breast and compression paddle). F1 is determined in a small breast interior, which minimizes the risk that peripheral pixels are included in the measurement at the cost of increasing the chance that no proper reference can be found. F2 is obtained using a larger breast interior. The new approach which is developed for very dense breasts does not require the presence of a fatty tissue region. As reference region we select the densest region in the mammogram and assume that this represents a projection of entirely dense tissue embedded between the subcutaneous fatty tissue layers. By measuring the thickness of the fat layers a reference (F3) can be computed. To obtain accurate breast density estimates irrespective of breast composition we investigated a combination of the results of the three reference values. We collected 202 pairs of MRI's and digital mammograms from 119 women. We compared the percent dense volume estimates based on both modalities and calculated Pearson's correlation coefficients. + With the references F1-F3 we found respectively a correlation of R=0.80, R=0.89 and R=0.74. Best results were obtained with the combination of the density estimations (R=0.90). + Results show that better volumetric density estimates can be obtained with the hybrid method, in particular for dense breasts, when algorithms are combined to obtain a fatty tissue reference value depending on breast composition.}, file = {:pdf/Holl17a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {28230532}, gsid = {849413026229250543}, gscites = {6}, + all_ss_ids = {e844b6b027c94468de8a607497f95a3771b7d48b}, } @phdthesis{Holl17b, @@ -9780,15 +12239,17 @@ @article{Holl17c doi = {10.1186/s13058-017-0917-3}, url = {https://doi.org/10.1186/s13058-017-0917-3}, abstract = {Background In mammography, breast compression is applied to reduce the thickness of the breast. While it is widely accepted that firm breast compression is needed to ensure acceptable image quality, guidelines remain vague about how much compression should be applied during mammogram acquisition. A quantitative parameter indicating the desirable amount of compression is not available. Consequently, little is known about the relationship between the amount of breast compression and breast cancer detectability. The purpose of this study is to determine the effect of breast compression pressure in mammography on breast cancer screening outcomes. - Methods We used digital image analysis methods to determine breast volume, percent dense volume, and pressure from 132,776 examinations of 57,179 women participating in the Dutch population-based biennial breast cancer screening program. Pressure was estimated by dividing the compression force by the area of the contact surface between breast and compression paddle. The data was subdivided into quintiles of pressure and the number of screen-detected cancers, interval cancers, false positives, and true negatives were determined for each group. Generalized estimating equations were used to account for correlation between examinations of the same woman and for the effect of breast density and volume when estimating sensitivity, specificity, and other performance measures. Sensitivity was computed using interval cancers occurring between two screening rounds and using interval cancers within 12 months after screening. Pair-wise testing for significant differences was performed. - Results Percent dense volume increased with increasing pressure, while breast volume decreased. Sensitivity in quintiles with increasing pressure was 82.0%, 77.1%, 79.8%, 71.1%, and 70.8%. Sensitivity based on interval cancers within 12 months was significantly lower in the highest pressure quintile compared to the third (84.3% vs 93.9%, p=0.034). Specificity was lower in the lowest pressure quintile (98.0%) compared to the second, third, and fourth group (98.5%, p<0.005). Specificity of the fifth quintile was 98.4%. - Conclusion Results suggest that if too much pressure is applied during mammography this may reduce sensitivity. In contrast, if pressure is low this may decrease specificity.}, + Methods We used digital image analysis methods to determine breast volume, percent dense volume, and pressure from 132,776 examinations of 57,179 women participating in the Dutch population-based biennial breast cancer screening program. Pressure was estimated by dividing the compression force by the area of the contact surface between breast and compression paddle. The data was subdivided into quintiles of pressure and the number of screen-detected cancers, interval cancers, false positives, and true negatives were determined for each group. Generalized estimating equations were used to account for correlation between examinations of the same woman and for the effect of breast density and volume when estimating sensitivity, specificity, and other performance measures. Sensitivity was computed using interval cancers occurring between two screening rounds and using interval cancers within 12 months after screening. Pair-wise testing for significant differences was performed. + Results Percent dense volume increased with increasing pressure, while breast volume decreased. Sensitivity in quintiles with increasing pressure was 82.0%, 77.1%, 79.8%, 71.1%, and 70.8%. Sensitivity based on interval cancers within 12 months was significantly lower in the highest pressure quintile compared to the third (84.3% vs 93.9%, p=0.034). Specificity was lower in the lowest pressure quintile (98.0%) compared to the second, third, and fourth group (98.5%, p<0.005). Specificity of the fifth quintile was 98.4%. + Conclusion Results suggest that if too much pressure is applied during mammography this may reduce sensitivity. In contrast, if pressure is low this may decrease specificity.}, file = {:pdf/Holl17c.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {29183348}, month = {11}, gsid = {15897061577657853702}, - gscites = {19}, + gscites = {36}, + ss_id = {817c6ab7fde2a45210b6b80c4713b9147b3eba19}, + all_ss_ids = {['817c6ab7fde2a45210b6b80c4713b9147b3eba19']}, } @article{Hols15, @@ -9807,7 +12268,9 @@ @article{Hols15 optnote = {DIAG, RADIOLOGY}, pmid = {26446068}, gsid = {12137067856305542323}, - gscites = {53}, + gscites = {54}, + ss_id = {5d3d4d3b4197a9f121cd1a287dc9a80c5dfbb1ad}, + all_ss_ids = {['5d3d4d3b4197a9f121cd1a287dc9a80c5dfbb1ad']}, } @article{Hols18, @@ -9823,7 +12286,9 @@ @article{Hols18 optnote = {DIAG, RADIOLOGY}, pmid = {29270357}, gsid = {1964612976773745344}, - gscites = {20}, + gscites = {65}, + ss_id = {4458f33f0650bba1f12dc16956608386c46b76b0}, + all_ss_ids = {['4458f33f0650bba1f12dc16956608386c46b76b0']}, } @conference{Hoop08, @@ -9857,7 +12322,9 @@ @article{Hoop09 pmid = {19018537}, month = {11}, gsid = {17148475176430192127}, - gscites = {135}, + gscites = {156}, + ss_id = {6845a171a50763bf6409c2729dc4d368860e0baf}, + all_ss_ids = {['6845a171a50763bf6409c2729dc4d368860e0baf']}, } @conference{Hoop09a, @@ -9953,8 +12420,10 @@ @article{Hoop12 pmid = {22929331}, month = {11}, gsid = {18085872374519145921}, - gscites = {117}, + gscites = {151}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/109230}, + ss_id = {846de900573aebb1a0f2b36dfd7d080cad255552}, + all_ss_ids = {['846de900573aebb1a0f2b36dfd7d080cad255552']}, } @article{Hore13a, @@ -10069,7 +12538,9 @@ @inproceedings{Hoss19 file = {:pdf/Hoss19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {11874902790936487952}, - gscites = {1}, + gscites = {12}, + ss_id = {d298ada2cac3052301a355fb39cf5fe6ae0fe41f}, + all_ss_ids = {['d298ada2cac3052301a355fb39cf5fe6ae0fe41f']}, } @article{Hoss21, @@ -10080,25 +12551,41 @@ @article{Hoss21 doi = {10.1007/s00330-021-08320-y}, url = {https://doi.org/10.1007/s00330-021-08320-y}, abstract = {Objectives - To assess Prostate Imaging Reporting and Data System (PI-RADS)-trained deep learning (DL) algorithm performance and to investigate the effect of data size and prior knowledge on the detection of clinically significant prostate cancer (csPCa) in biopsy-naive men with a suspicion of PCa. - - Methods - Multi-institution data included 2734 consecutive biopsy-naive men with elevated PSA levels (>= 3 ng/mL) that underwent multi-parametric MRI (mpMRI). mpMRI exams were prospectively reported using PI-RADS v2 by expert radiologists. A DL framework was designed and trained on center 1 data (n = 1952) to predict PI-RADS >= 4 (n = 1092) lesions from bi-parametric MRI (bpMRI). Experiments included varying the number of cases and the use of automatic zonal segmentation as a DL prior. Independent center 2 cases (n = 296) that included pathology outcome (systematic and MRI targeted biopsy) were used to compute performance for radiologists and DL. The performance of detecting PI-RADS 4-5 and Gleason > 6 lesions was assessed on 782 unseen cases (486 center 1, 296 center 2) using free-response ROC (FROC) and ROC analysis. - - Results - The DL sensitivity for detecting PI-RADS >= 4 lesions was 87% (193/223, 95% CI: 82-91) at an average of 1 false positive (FP) per patient, and an AUC of 0.88 (95% CI: 0.84-0.91). The DL sensitivity for the detection of Gleason > 6 lesions was 85% (79/93, 95% CI: 77-83) @ 1 FP compared to 91% (85/93, 95% CI: 84-96) @ 0.3 FP for a consensus panel of expert radiologists. Data size and prior zonal knowledge significantly affected performance (4%, p<0.05). - - Conclusion - PI-RADS-trained DL can accurately detect and localize Gleason > 6 lesions. DL could reach expert performance using substantially more than 2000 training cases, and DL zonal segmentation.}, + To assess Prostate Imaging Reporting and Data System (PI-RADS)-trained deep learning (DL) algorithm performance and to investigate the effect of data size and prior knowledge on the detection of clinically significant prostate cancer (csPCa) in biopsy-naive men with a suspicion of PCa. + + Methods + Multi-institution data included 2734 consecutive biopsy-naive men with elevated PSA levels (>= 3 ng/mL) that underwent multi-parametric MRI (mpMRI). mpMRI exams were prospectively reported using PI-RADS v2 by expert radiologists. A DL framework was designed and trained on center 1 data (n = 1952) to predict PI-RADS >= 4 (n = 1092) lesions from bi-parametric MRI (bpMRI). Experiments included varying the number of cases and the use of automatic zonal segmentation as a DL prior. Independent center 2 cases (n = 296) that included pathology outcome (systematic and MRI targeted biopsy) were used to compute performance for radiologists and DL. The performance of detecting PI-RADS 4-5 and Gleason > 6 lesions was assessed on 782 unseen cases (486 center 1, 296 center 2) using free-response ROC (FROC) and ROC analysis. + + Results + The DL sensitivity for detecting PI-RADS >= 4 lesions was 87% (193/223, 95% CI: 82-91) at an average of 1 false positive (FP) per patient, and an AUC of 0.88 (95% CI: 0.84-0.91). The DL sensitivity for the detection of Gleason > 6 lesions was 85% (79/93, 95% CI: 77-83) @ 1 FP compared to 91% (85/93, 95% CI: 84-96) @ 0.3 FP for a consensus panel of expert radiologists. Data size and prior zonal knowledge significantly affected performance (4%, p<0.05). + + Conclusion + PI-RADS-trained DL can accurately detect and localize Gleason > 6 lesions. DL could reach expert performance using substantially more than 2000 training cases, and DL zonal segmentation.}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/249485}, + ss_id = {21face92913ea6919840f59cd3cd5e84e70ebc7d}, + all_ss_ids = {['21face92913ea6919840f59cd3cd5e84e70ebc7d']}, + gscites = {43}, +} + +@article{Hoss23, + author = {Hosseinzadeh, Matin and Saha, Anindo and Bosma, Joeran and Huisman, Henkjan}, + title = {~~Uncertainty-Aware Semi-Supervised Learning for Prostate MRI Zonal Segmentation}, + doi = {10.48550/ARXIV.2305.05984}, + year = {2023}, + abstract = {Quality of deep convolutional neural network predictions strongly depends on the size of the training dataset and the quality of the annotations. Creating annotations, especially for 3D medical image segmentation, is time-consuming and requires expert knowledge. We propose a novel semi-supervised learning (SSL) approach that requires only a relatively small number of annotations while being able to use the remaining unlabeled data to improve model performance. Our method uses a pseudo-labeling technique that employs recent deep learning uncertainty estimation models. By using the estimated uncertainty, we were able to rank pseudo-labels and automatically select the best pseudo-annotations generated by the supervised model. We applied this to prostate zonal segmentation in T2-weighted MRI scans. Our proposed model outperformed the semi-supervised model in experiments with the ProstateX dataset and an external test set, by leveraging only a subset of unlabeled data rather than the full collection of 4953 cases, our proposed model demonstrated improved performance. The segmentation dice similarity coefficient in the transition zone and peripheral zone increased from 0.835 and 0.727 to 0.852 and 0.751, respectively, for fully supervised model and the uncertainty-aware semi-supervised learning model (USSL). Our USSL model demonstrates the potential to allow deep learning models to be trained on large datasets without requiring full annotation. Our code is available at https://github.com/DIAGNijmegen/prostateMR-USSL.}, + url = {https://arxiv.org/abs/2305.05984}, + file = {Hoss23.pdf:pdf\\Hoss23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2305.05984}, + automatic = {yes}, } @mastersthesis{Hout20, author = {Thijs van den Hout}, title = {Automatic muscle and fat segmentation in 3D abdominal CT images for body composition assessment}, abstract = {Body composition is an informative biomarker in the treatment of cancer. In particular, low muscle mass has been associated with higher chemotherapy toxicity, shorter time to tumor progression, poorer surgical outcomes, impaired functional status, and shorter survival. However, because CT-based body composition assessment requires outlining the different tissues in the image, which is timeconsuming, its practical value is currently limited. To form an estimate of body composition, different tissues are often segmented manually in a single 2D slice from the abdomen. - For use in both routine care and in research studies, automatic segmentation of the different tissue types in the abdomen is desirable. - This study focuses on the development and testing of an automatic approach to segment muscle and fat tissue in the entire abdomen. The four classes of interest are skeletal muscle (SM), inter-muscular adipose tissue (IMAT), visceral adipose tissue (VAT), and subcutaneous adipose tissue (SAT). A deep neural network is trained on two-dimensional CT slices at the level of the third lumbar vertebra. Three experiments were carried out with the goal of improving the network with information from other, unannotated data sources. Active learning methods were applied to sample additional data to annotate and include in the training of the model. The proposed algorithm combines two models to segment muscle and fat in the entire abdomen and achieves state-of-the-art results. Dice scores of 0.91, 0.84, 0.97, and 0.97 were attained for SM, IMAT, VAT, and SAT, respectively, averaged over five locations throughout the abdomen.}, + For use in both routine care and in research studies, automatic segmentation of the different tissue types in the abdomen is desirable. + This study focuses on the development and testing of an automatic approach to segment muscle and fat tissue in the entire abdomen. The four classes of interest are skeletal muscle (SM), inter-muscular adipose tissue (IMAT), visceral adipose tissue (VAT), and subcutaneous adipose tissue (SAT). A deep neural network is trained on two-dimensional CT slices at the level of the third lumbar vertebra. Three experiments were carried out with the goal of improving the network with information from other, unannotated data sources. Active learning methods were applied to sample additional data to annotate and include in the training of the model. The proposed algorithm combines two models to segment muscle and fat in the entire abdomen and achieves state-of-the-art results. Dice scores of 0.91, 0.84, 0.97, and 0.97 were attained for SM, IMAT, VAT, and SAT, respectively, averaged over five locations throughout the abdomen.}, file = {Hout20.pdf:pdf/Hout20.pdf:PDF}, optnote = {DIAG}, school = {Radboud University Medical Center}, @@ -10106,6 +12593,21 @@ @mastersthesis{Hout20 journal = {Master thesis}, } +@book{Hovi16, + author = {Hovinga, Marieke and Sprengers, Ralf and Kauczor, Hans-Ulrich and Schaefer-Prokop, Cornelia}, + title = {~~CT Imaging of Interstitial Lung Diseases}, + doi = {10.1007/978-3-319-30355-0_7}, + year = {2016}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-319-30355-0_7}, + file = {Hovi16.pdf:pdf\\Hovi16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Multidetector-Row CT of the Thorax}, + automatic = {yes}, + citation-count = {2}, + pages = {105-130}, +} + @article{Hu10, author = {Y. Hu and R. van den Boom and T. Carter and Z. Taylor and D. Hawkes and H. U. Ahmed and M. Emberton and C. Allen and D. Barratt}, title = {A comparison of the accuracy of statistical models of prostate motion trained using data from biomechanical simulations}, @@ -10124,42 +12626,53 @@ @article{Hu10 @article{Hu19, author = {Hu, Shi and Worrall, Daniel and Knegt, Stefan and Veeling, Bas and Huisman, Henkjan and Welling, Max}, - title = {Supervised uncertainty quantification for segmentation with multiple annotations}, - journal = {arXiv:1907.01949}, - year = {2019}, - abstract = {The accurate estimation of predictive uncertainty carries importance in medical scenarios such as lung node segmentation. Unfortunately, most existing works on predictive uncertainty do not return calibrated uncertainty estimates, which could be used in practice. In this work we exploit multi-grader annotation variability as a source of 'groundtruth' aleatoric uncertainty, which can be treated as a target in a supervised learning problem. We combine this groundtruth uncertainty with a Probabilistic U-Net and test on the LIDC-IDRI lung nodule CT dataset and MICCAI2012 prostate MRI dataset. We find that we are able to improve predictive uncertainty estimates. We also find that we can improve sample accuracy and sample diversity.}, - optnote = {DIAG}, + title = {Automated deep-learning system in the assessment of MRI-visible prostate cancer: comparison of advanced zoomed diffusion-weighted imaging and conventional technique.}, + journal = {Cancer imaging : the official publication of the International Cancer Imaging Society}, + year = {2023}, + abstract = {Deep-learning-based computer-aided diagnosis (DL-CAD) systems using MRI for prostate cancer (PCa) detection have demonstrated good performance. Nevertheless, DL-CAD systems are vulnerable to high heterogeneities in DWI, which can interfere with DL-CAD assessments and impair performance. This study aims to compare PCa detection of DL-CAD between zoomed-field-of-view echo-planar DWI (z-DWI) and full-field-of-view DWI (f-DWI) and find the risk factors affecting DL-CAD diagnostic efficiency. This retrospective study enrolled 354 consecutive participants who underwent MRI including T2WI, f-DWI, and z-DWI because of clinically suspected PCa. A DL-CAD was used to compare the performance of f-DWI and z-DWI both on a patient level and lesion level. We used the area under the curve (AUC) of receiver operating characteristics analysis and alternative free-response receiver operating characteristics analysis to compare the performances of DL-CAD using f- DWI and z-DWI. The risk factors affecting the DL-CAD were analyzed using logistic regression analyses. P values less than 0.05 were considered statistically significant. DL-CAD with z-DWI had a significantly better overall accuracy than that with f-DWI both on patient level and lesion level (AUC : 0.89 vs. 0.86; AUC : 0.86 vs. 0.76; P < .001). The contrast-to-noise ratio (CNR) of lesions in DWI was an independent risk factor of false positives (odds ratio [OR] = 1.12; P < .001). Rectal susceptibility artifacts, lesion diameter, and apparent diffusion coefficients (ADC) were independent risk factors of both false positives (OR = 5.46; OR = 1.12; OR = 0.998; all P < .001) and false negatives (OR = 3.31; OR = 0.82; OR = 1.007; all P <= .03) of DL-CAD. Z-DWI has potential to improve the detection performance of a prostate MRI based DL-CAD. ChiCTR, NO. ChiCTR2100041834 . Registered 7 January 2021.}, + optnote = {DIAG, RADIOLOGY}, month = {7}, gsid = {4596090620314967344}, gscites = {12}, + doi = {10.1186/s40644-023-00527-0}, + issue = {1}, + pages = {6}, + volume = {23}, + pmid = {36647150}, + file = {Hu23.pdf:pdf\\Hu23.pdf:PDF}, + ss_id = {b5c3309a499f0e8a3207f6b25b34ead5b0cbae9e}, + all_ss_ids = {['b5c3309a499f0e8a3207f6b25b34ead5b0cbae9e']}, } -{Hu23, - author          = {Hu, Lei and Fu, Caixia and Song, Xinyang and Grimm, Robert and von Busch, Heinrich and Benkert, Thomas and Kamen, Ali and Lou, Bin and Huisman, Henkjan and Tong, Angela and Penzkofer, Tobias and Choi, Moon Hyung and Shabunin, Ivan and Winkel, David and Xing, Pengyi and Szolar, Dieter and Coakley, Fergus and Shea, Steven and Szurowska, Edyta and Guo, Jing-Yi and Li, Liang and Li, Yue-Hua and Zhao, Jun-Gong}, - year = {2023}, - journal    = {Cancer imaging : the official publication of the International Cancer Imaging Society}, - title           = {Automated deep-learning system in the assessment of MRI-visible prostate cancer: comparison of advanced zoomed diffusion-weighted imaging and conventional technique.}, - doi             = {10.1186/s40644-023-00527-0}, - issn            = {1470-7330}, - issue           = {1}, - pages           = {6}, - pubstate        = {epublish}, - volume          = {23}, - abstract        = {Deep-learning-based computer-aided diagnosis (DL-CAD) systems using MRI for prostate cancer (PCa) detection have demonstrated good performance. Nevertheless, DL-CAD systems are vulnerable to high heterogeneities in DWI, which can interfere with DL-CAD assessments and impair performance. This study aims to compare PCa detection of DL-CAD between zoomed-field-of-view echo-planar DWI (z-DWI) and full-field-of-view DWI (f-DWI) and find the risk factors affecting DL-CAD diagnostic efficiency. This retrospective study enrolled 354 consecutive participants who underwent MRI including T2WI, f-DWI, and z-DWI because of clinically suspected PCa. A DL-CAD was used to compare the performance of f-DWI and z-DWI both on a patient level and lesion level. We used the area under the curve (AUC) of receiver operating characteristics analysis and alternative free-response receiver operating characteristics analysis to compare the performances of DL-CAD using f- DWI and z-DWI. The risk factors affecting the DL-CAD were analyzed using logistic regression analyses. P values less than 0.05 were considered statistically significant. DL-CAD with z-DWI had a significantly better overall accuracy than that with f-DWI both on patient level and lesion level (AUC : 0.89 vs. 0.86; AUC : 0.86 vs. 0.76; P < .001). The contrast-to-noise ratio (CNR) of lesions in DWI was an independent risk factor of false positives (odds ratio [OR] = 1.12; P < .001). Rectal susceptibility artifacts, lesion diameter, and apparent diffusion coefficients (ADC) were independent risk factors of both false positives (OR  = 5.46; OR  = 1.12; OR  = 0.998; all P < .001) and false negatives (OR  = 3.31; OR  = 0.82; OR  = 1.007; all P ≤ .03) of DL-CAD. Z-DWI has potential to improve the detection performance of a prostate MRI based DL-CAD. ChiCTR, NO. ChiCTR2100041834 . Registered 7 January 2021.}, - citation-subset = {IM}, - completed       = {2023-01-18}, - country         = {England}, - issn-linking    = {1470-7330}, - keywords        = {Male; Humans; Retrospective Studies; Deep Learning; Reproducibility of Results; Prostatic Neoplasms, diagnostic imaging, pathology; Magnetic Resonance Imaging, methods; Diffusion Magnetic Resonance Imaging, methods; Artifact; Deep learning; Diffusion magnetic resonance imaging; Prostatic neoplasms; Risk factor}, - nlm-id          = {101172931}, - owner           = {NLM}, - pii             = {6}, - pmc             = {PMC9843860}, - pmid            = {36647150}, - pubmodel        = {Electronic}, - revised         = {2023-01-20}, +@book{Hu19a, + author = {Hu, Shi and Worrall, Daniel and Knegt, Stefan and Veeling, Bas and Huisman, Henkjan and Welling, Max}, + title = {~~Supervised Uncertainty Quantification for Segmentation with Multiple Annotations}, + doi = {10.1007/978-3-030-32245-8_16}, + year = {2019}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-030-32245-8_16}, + file = {Hu19a.pdf:pdf\\Hu19a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, - file = {Hu23.pdf:pdf\\Hu23.pdf:PDF}, + journal = {Lecture Notes in Computer Science}, + automatic = {yes}, + citation-count = {33}, + pages = {137-145}, +} + +@article{Hude20, + author = {Hudecek, Jan and Voorwerk, Leonie and van Seijen, Maartje and Nederlof, Iris and de Maaker, Michiel and van den Berg, Jose and van de Vijver, Koen K. and Sikorska, Karolina and Adams, Sylvia and Demaria, Sandra and Viale, Giuseppe and Nielsen, Torsten O. and Badve, Sunil S. and Michiels, Stefan and Symmans, William Fraser and Sotiriou, Christos and Rimm, David L. and Hewitt, Stephen M. and Denkert, Carsten and Loibl, Sibylle and Loi, Sherene and Bartlett, John M. S. and Pruneri, Giancarlo and Dillon, Deborah A. and Cheang, Maggie C. U. and Tutt, Andrew and Hall, Jacqueline A. and Kos, Zuzana and Salgado, Roberto and Kok, Marleen and Horlings, Hugo M. and Group, International Immuno-Oncology Biomarker Working}, + title = {Application of a risk-management framework for integration of stromal tumor-infiltrating lymphocytes in clinical trials.}, + doi = {10.1038/s41523-020-0155-1}, + pages = {15}, + volume = {6}, + abstract = {Stromal tumor-infiltrating lymphocytes (sTILs) are a potential predictive biomarker for immunotherapy response in metastatic triple-negative breast cancer (TNBC). To incorporate sTILs into clinical trials and diagnostics, reliable assessment is essential. In this review, we propose a new concept, namely the implementation of a risk-management framework that enables the use of sTILs as a stratification factor in clinical trials. We present the design of a biomarker risk-mitigation workflow that can be applied to any biomarker incorporation in clinical trials. We demonstrate the implementation of this concept using sTILs as an integral biomarker in a single-center phase II immunotherapy trial for metastatic TNBC (TONIC trial, NCT02499367), using this workflow to mitigate risks of suboptimal inclusion of sTILs in this specific trial. In this review, we demonstrate that a web-based scoring platform can mitigate potential risk factors when including sTILs in clinical trials, and we argue that this framework can be applied for any future biomarker-driven clinical trial setting.}, + file = {Hude20.pdf:pdf\\Hude20.pdf:PDF}, + journal = {NPJ breast cancer}, + optnote = {DIAG, RADIOLOGY}, + pmid = {32436923}, + year = {2020}, + all_ss_ids = {['c52daf1cb971120c4083116cfc213acbaac6faaf', 'cde27a74addf80ca2b3385a32966d51d35e0b2ff']}, + gscites = {18}, } @article{Huis01, @@ -10177,6 +12690,7 @@ @article{Huis01 gsid = {17600801553193803424}, gscites = {131}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/122355}, + all_ss_ids = {['4fff96d3fa57f3a194b03f5524b9f45fc67dcb00']}, } @article{Huis05, @@ -10194,8 +12708,10 @@ @article{Huis05 pmid = {15983070}, month = {7}, gsid = {1438085584005109691}, - gscites = {69}, + gscites = {73}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/47372}, + ss_id = {f4695e725707a1c6f725d6243d0f02bce4e5090a}, + all_ss_ids = {['f4695e725707a1c6f725d6243d0f02bce4e5090a']}, } @inproceedings{Huis07, @@ -10213,6 +12729,8 @@ @inproceedings{Huis07 gsid = {9371668300991606553}, gscites = {6}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/53276}, + ss_id = {cce569e05666a3817a93c128c705733c5c42df5d}, + all_ss_ids = {['cce569e05666a3817a93c128c705733c5c42df5d']}, } @inproceedings{Huis10, @@ -10223,6 +12741,9 @@ @inproceedings{Huis10 abstract = {One in 10 men will be diagnosed with prostate cancer during their life. PSA screening in combination with MR is likely to save lifes at low biopsy and overtreatment rates. Computer Aided Diagnosis for prostate MR will become mandatory in a high volume screening application. This paper presents an overview including our recent work in this area. It includes screening MR setup, quantitative imaging features, prostate segmentation, and pattern recognition.}, file = {:pdf/Huis10.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {85137dca01c20c973abc9ed419b67ab335822bf1}, + all_ss_ids = {['85137dca01c20c973abc9ed419b67ab335822bf1']}, + gscites = {2}, } @conference{Huis10a, @@ -10249,6 +12770,9 @@ @article{Huis19 publisher = {Elsevier}, month = {9}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/209081}, + ss_id = {37e39166873ad1f981a845eda627f3d161f105e1}, + all_ss_ids = {['37e39166873ad1f981a845eda627f3d161f105e1']}, + gscites = {0}, } @article{Huis21, @@ -10264,6 +12788,9 @@ @article{Huis21 year = {2001}, file = {:pdf/Huis21.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {4fff96d3fa57f3a194b03f5524b9f45fc67dcb00}, + all_ss_ids = {['4fff96d3fa57f3a194b03f5524b9f45fc67dcb00']}, + gscites = {118}, } @article{Huis96, @@ -10283,6 +12810,8 @@ @article{Huis96 gsid = {6709853578080453326}, gscites = {58}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/122252}, + ss_id = {8224f913e000d00d30ddc6e5fcf7486adc9e31c3}, + all_ss_ids = {['8224f913e000d00d30ddc6e5fcf7486adc9e31c3']}, } @article{Huis98, @@ -10302,6 +12831,8 @@ @article{Huis98 gsid = {973090722216732761}, gscites = {26}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/122231}, + ss_id = {93c05396532b476b41337b34a14b5d044f5057ef}, + all_ss_ids = {['93c05396532b476b41337b34a14b5d044f5057ef']}, } @article{Huis98a, @@ -10321,6 +12852,8 @@ @article{Huis98a gsid = {10946179538840967233}, gscites = {26}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/122220}, + ss_id = {ead58bdc63387bba332e8d57480558be85f263d7}, + all_ss_ids = {['ead58bdc63387bba332e8d57480558be85f263d7']}, } @article{Huis98b, @@ -10339,6 +12872,8 @@ @article{Huis98b month = {5}, gsid = {5765466617692113319}, gscites = {13}, + ss_id = {056af49e670a8c5385d7c39977acd8d49de9fff0}, + all_ss_ids = {['056af49e670a8c5385d7c39977acd8d49de9fff0']}, } @phdthesis{Huis98c, @@ -10370,7 +12905,9 @@ @inproceedings{Hump17 optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {11043862804825340240}, - gscites = {10}, + gscites = {16}, + ss_id = {b704867c644d95b8ddc980b19b3805181b7a28ab}, + all_ss_ids = {['b704867c644d95b8ddc980b19b3805181b7a28ab']}, } @article{Hump18, @@ -10389,7 +12926,9 @@ @article{Hump18 pmid = {29512516}, month = {4}, gsid = {16888189080982033735}, - gscites = {11}, + gscites = {30}, + ss_id = {0c7b80f04984d9cea99337c21cee93a69dae27dd}, + all_ss_ids = {['0c7b80f04984d9cea99337c21cee93a69dae27dd']}, } @article{Hump20, @@ -10408,7 +12947,22 @@ @article{Hump20 optnote = {DIAG, RADIOLOGY}, pmid = {33937830}, gsid = {16221938647525067795}, - gscites = {1}, + gscites = {21}, + ss_id = {4ad04c0dce1e8a12c49bae26252f476383ec3273}, + all_ss_ids = {['4ad04c0dce1e8a12c49bae26252f476383ec3273']}, +} + +@article{Hump23, + author = {Humpire-Mamani, Gabriel Efrain and Jacobs, Colin and Prokop, Mathias and van Ginneken, Bram and Lessmann, Nikolas}, + title = {Transfer learning from a sparsely annotated dataset of 3D medical images}, + doi = {10.48550/ARXIV.2311.05032}, + url = {https://arxiv.org/abs/2311.05032}, + abstract = {Transfer learning leverages pre-trained model features from a large dataset to save time and resources when training new models for various tasks, potentially enhancing performance. Due to the lack of large datasets in the medical imaging domain, transfer learning from one medical imaging model to other medical imaging models has not been widely explored. This study explores the use of transfer learning to improve the performance of deep convolutional neural networks for organ segmentation in medical imaging. A base segmentation model (3D U-Net) was trained on a large and sparsely annotated dataset; its weights were used for transfer learning on four new down-stream segmentation tasks for which a fully annotated dataset was available. We analyzed the training set size's influence to simulate scarce data. The results showed that transfer learning from the base model was beneficial when small datasets were available, providing significant performance improvements; where fine-tuning the base model is more beneficial than updating all the network weights with vanilla transfer learning. Transfer learning with fine-tuning increased the performance by up to 0.129 (+28\%) Dice score than experiments trained from scratch, and on average 23 experiments increased the performance by 0.029 Dice score in the new segmentation tasks. The study also showed that cross-modality transfer learning using CT scans was beneficial. The findings of this study demonstrate the potential of transfer learning to improve the efficiency of annotation and increase the accessibility of accurate organ segmentation in medical imaging, ultimately leading to improved patient care. We made the network definition and weights publicly available to benefit other users and researchers.}, + automatic = {yes}, + file = {Hump23.pdf:pdf\\Hump23.pdf:PDF}, + journal = {arXiv:2311.05032}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, } @inproceedings{Huo11, @@ -10461,6 +13015,8 @@ @inproceedings{Hups08 month = {3}, gsid = {15388016742450396396}, gscites = {4}, + ss_id = {9d77d77caa82df8186ac09d9f44131eab2f3b985}, + all_ss_ids = {['9d77d77caa82df8186ac09d9f44131eab2f3b985']}, } @inproceedings{Hups09, @@ -10478,6 +13034,8 @@ @inproceedings{Hups09 month = {2}, gsid = {15617708437218037687}, gscites = {8}, + ss_id = {33d9ad8ffba587147b72dbf8e166f30d5170ee99}, + all_ss_ids = {['33d9ad8ffba587147b72dbf8e166f30d5170ee99']}, } @article{Hups09a, @@ -10514,6 +13072,8 @@ @article{Hups10 month = {4}, gsid = {10192890197026575770}, gscites = {28}, + ss_id = {907b1d90f89dd803460133cc5f005797514ded42}, + all_ss_ids = {['907b1d90f89dd803460133cc5f005797514ded42']}, } @phdthesis{Hups12a, @@ -10543,7 +13103,9 @@ @article{Hups13 pmid = {22772149}, month = {7}, gsid = {12703997379279415258}, - gscites = {17}, + gscites = {21}, + ss_id = {68703321fc18fde3cd4a568b328280f69b594af8}, + all_ss_ids = {['68703321fc18fde3cd4a568b328280f69b594af8']}, } @article{Hups13a, @@ -10560,7 +13122,9 @@ @article{Hups13a pmid = {23091171}, month = {1}, gsid = {8185845335393970951}, - gscites = {45}, + gscites = {57}, + ss_id = {2bde6fd417a50bf9f52eda4fa9a1dc7a3a2a5853}, + all_ss_ids = {['2bde6fd417a50bf9f52eda4fa9a1dc7a3a2a5853']}, } @article{Igle09, @@ -10579,6 +13143,8 @@ @article{Igle09 month = {11}, gsid = {14708859348258176979}, gscites = {33}, + ss_id = {b5efadc7ed85042a5af820dfc430993a08ba00af}, + all_ss_ids = {['b5efadc7ed85042a5af820dfc430993a08ba00af']}, } @article{Isgu03, @@ -10607,6 +13173,8 @@ @inproceedings{Isgu03a optnote = {DIAG, RADIOLOGY}, gsid = {6017208270322190053}, gscites = {4}, + ss_id = {e818125c17499f8c23accc7ed09b9c7bed102724}, + all_ss_ids = {['e818125c17499f8c23accc7ed09b9c7bed102724']}, } @article{Isgu04, @@ -10623,7 +13191,9 @@ @article{Isgu04 pmid = {15035514}, month = {3}, gsid = {2474107927891219113}, - gscites = {46}, + gscites = {56}, + ss_id = {a996f17647b43c0af7b6d024b35cb990abdaf10c}, + all_ss_ids = {['edcb88076df807a341f31128b3c94748a4df8003', 'a996f17647b43c0af7b6d024b35cb990abdaf10c']}, } @inproceedings{Isgu04a, @@ -10637,6 +13207,8 @@ @inproceedings{Isgu04a optnote = {DIAG, RADIOLOGY}, gsid = {10038102016891366329}, gscites = {10}, + ss_id = {588dd6048ff9804005f9e5f83b587fa3ee011f9f}, + all_ss_ids = {['588dd6048ff9804005f9e5f83b587fa3ee011f9f']}, } @inproceedings{Isgu05, @@ -10650,6 +13222,8 @@ @inproceedings{Isgu05 optnote = {DIAG, RADIOLOGY}, gsid = {3137964482447567295}, gscites = {6}, + ss_id = {bbc76f4964bdb6db523ab9e9b6052755718f2f3e}, + all_ss_ids = {['bbc76f4964bdb6db523ab9e9b6052755718f2f3e']}, } @conference{Isgu05a, @@ -10675,7 +13249,9 @@ @article{Isgu07 pmid = {17500476}, month = {3}, gsid = {4877154343244870689}, - gscites = {89}, + gscites = {95}, + ss_id = {b717ba083290575d9176a4620c3abe46a949cbeb}, + all_ss_ids = {['b717ba083290575d9176a4620c3abe46a949cbeb']}, } @phdthesis{Isgu07a, @@ -10718,8 +13294,10 @@ @article{Isgu09 pmid = {19131298}, month = {7}, gsid = {13277270236947780741}, - gscites = {390}, + gscites = {413}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/79693}, + ss_id = {af1ac5542db7aab4911c3f70b080564c83d4d0cc}, + all_ss_ids = {['af1ac5542db7aab4911c3f70b080564c83d4d0cc']}, } @conference{Isgu09a, @@ -10754,8 +13332,10 @@ @article{Isgu10 pmid = {20229881}, month = {1}, gsid = {13271497965691699016}, - gscites = {33}, + gscites = {36}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/88423}, + ss_id = {1ae278d37a894cb1d50a70c79b2db493f476631e}, + all_ss_ids = {['1ae278d37a894cb1d50a70c79b2db493f476631e']}, } @inproceedings{Isgu10a, @@ -10772,7 +13352,9 @@ @inproceedings{Isgu10a optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {3227380864420599141}, - gscites = {3}, + gscites = {4}, + ss_id = {b3d2443be3028a33c52bcaeb46d7bef2addf0e5e}, + all_ss_ids = {['b3d2443be3028a33c52bcaeb46d7bef2addf0e5e']}, } @conference{Isgu11, @@ -10798,8 +13380,10 @@ @article{Isgu12 pmid = {22961297}, month = {12}, gsid = {12501517643677936280}, - gscites = {83}, + gscites = {114}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110344}, + ss_id = {5597d386256263f67338a1b41a90ac204aa5ad38}, + all_ss_ids = {['5597d386256263f67338a1b41a90ac204aa5ad38']}, } @article{Jaco10, @@ -10817,8 +13401,10 @@ @article{Jaco10 pmid = {19875116}, month = {4}, gsid = {10232555956815609662}, - gscites = {116}, + gscites = {123}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/89727}, + ss_id = {e8c5a5edd60ed560edaa87fe335cabf1702e14c4}, + all_ss_ids = {['e8c5a5edd60ed560edaa87fe335cabf1702e14c4']}, } @article{Jaco10a, @@ -10836,8 +13422,10 @@ @article{Jaco10a pmid = {20410410}, month = {5}, gsid = {7723446783558799227}, - gscites = {41}, + gscites = {52}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/87789}, + ss_id = {0081e840aeaaa405c804371a3768db33d86bab0e}, + all_ss_ids = {['0081e840aeaaa405c804371a3768db33d86bab0e']}, } @inproceedings{Jaco10d, @@ -10854,6 +13442,9 @@ @inproceedings{Jaco10d optnote = {DIAG, RADIOLOGY}, number = {1}, month = {3}, + ss_id = {28200b417aacd207af7ff52bb7ede1fa7ee8b73a}, + all_ss_ids = {['28200b417aacd207af7ff52bb7ede1fa7ee8b73a']}, + gscites = {2}, } @inproceedings{Jaco10e, @@ -10870,6 +13461,9 @@ @inproceedings{Jaco10e optnote = {DIAG, RADIOLOGY}, number = {1}, month = {3}, + ss_id = {b7652a6a140d0de2324064d21ce7165f8ce44271}, + all_ss_ids = {['b7652a6a140d0de2324064d21ce7165f8ce44271']}, + gscites = {1}, } @inproceedings{Jaco10f, @@ -10886,6 +13480,9 @@ @inproceedings{Jaco10f optnote = {DIAG, RADIOLOGY}, number = {1}, month = {3}, + ss_id = {3b553792b01dc3235d0d5d19de5441c2a0570ec5}, + all_ss_ids = {['3b553792b01dc3235d0d5d19de5441c2a0570ec5']}, + gscites = {1}, } @inproceedings{Jaco11, @@ -10904,6 +13501,8 @@ @inproceedings{Jaco11 gsid = {8841871000202912452}, gscites = {40}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/96752}, + ss_id = {2b13cb88382d521bab7fe58fc4220ecec7648815}, + all_ss_ids = {['2b13cb88382d521bab7fe58fc4220ecec7648815']}, } @conference{Jaco11b, @@ -10926,6 +13525,8 @@ @inproceedings{Jaco11c optnote = {DIAG, RADIOLOGY, noduleDetectionCT, subsolidNoduleDetection}, gsid = {17175959006383118735}, gscites = {2}, + ss_id = {66625349b11735b78c23c1253d6e4b21add938ac}, + all_ss_ids = {['66625349b11735b78c23c1253d6e4b21add938ac']}, } @conference{Jaco12, @@ -10954,6 +13555,8 @@ @article{Jaco12a gsid = {10217888044027227279}, gscites = {101}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110713}, + ss_id = {0566f2dbb3c0d1e814b2067f6a14664bdcbb9881}, + all_ss_ids = {['0566f2dbb3c0d1e814b2067f6a14664bdcbb9881']}, } @conference{Jaco12b, @@ -10971,7 +13574,7 @@ @conference{Jaco12c booktitle = RSNA, year = {2012}, abstract = {{BACKGROUND} Today, lung cancer is the most common and most deadly cancer in men and women worldwide. The recent positive results of the National Lung Screening Trial (NLST) [1] have provided clear scientific evidence that screening with low dose chest CT reduces lung cancer mortality. The National Comprehensive Cancer Network has updated their recommendations for screening and now strongly recommend the use of low-dose CT screening for individuals at high risk for lung cancer [2]. At least one health insurance company has started to cover the cost of lung screening. In its current form, however, large scale introduction of CT lung screening would put an enormous burden on radiologists. Building upon our clinical and technical experience in reading, image analysis and data processing for large screening trials in Europe (over 30,000 CT scans from 10,000 participants) and a careful review of the existing commercially available lung workstations, we have developed a new dedicated chest reading workstation with a number of innovations that allows for an optimized high throughput workflow to report on low dose chest CT scans. The application is currently available as a research prototype and is in use at five sites. It is used in clinical research and includes automated detection, linking, volumetry, interval change analysis, and estimation of malignancy for each nodule finding. A structured report for each patient is produced with follow-up recommendations according to several guidelines, including the upcoming revised Fleischner Society guidelines for the management of pulmonary nodules. {METHODOLOGY/APPLICATION} The workstation that will be demonstrated includes a number of innovations and enhancements compared to currently commercially available software. - Each scan is preprocessed and scan quality is automatically assessed. Scans with low quality, artifacts or underlying interstitial lung disease are automatically flagged. - Each scan is elastically registered to all prior scans of the same patient. Findings in prior scans are propagated and linked to findings in the current scan. All scans and processing results are preloaded in the background to ensure rapid reading. - Highly effective computerized detection (CAD) of solid nodules [3] and sub-solid nodules [4] is integrated. - High throughput reading with CAD as a first reader is supported. Users can accept/reject at a setting of on average 10 to 15 candidate lesions per scan and thus report much quicker than traditional thin sliding MIP viewing of the entire volume (also supported). - Each nodule is automatically characterized as solid, part-solid, or non-solid and nodules with benign characteristics are automatically flagged. Detected benign characteristics include calcifications and peri-fissural opacities (lymph nodes). - Volumetry, volume growth rate, mass and mass growth rate are automatically computed with advanced segmentation algorithms [5] that have been extended to handle sub-solid lesions and segment the solid core of part-solid nodules. If necessary, the user can interactively adjust segmentations and compare side by side with the corresponding finding in all available prior scans to detect and correct segmentation inconsistencies. - Findings are summarized in a structured report in HTML and PDF format that is stored in the database and can be sent to requesting physicians. Follow-up recommendation according to various screening algorithms and guidelines of leading Societies are included. {DEMONSTRATION STRATEGY} The exhibit will be accompanied by an informational poster that will highlight the key differences between the proposed workflow and current clinical practice. The poster will also explain algorithmic concepts that underlie the automated analysis. Attendees will be able to gain hands-on experience with the workstation and read cases and use nodule detection, automated and interactive volumetry, see the results of classification of different nodule types and produce structured reports with follow-up recommendations, all within - minutes.}, + minutes.}, optnote = {DIAG, RADIOLOGY}, } @@ -11016,8 +13619,10 @@ @article{Jaco14 pmid = {24434166}, month = {2}, gsid = {13872923651436646410}, - gscites = {164}, + gscites = {233}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/136826}, + ss_id = {704081db7542b73bbdcefe9ae7e7dc19fe4316c0}, + all_ss_ids = {['704081db7542b73bbdcefe9ae7e7dc19fe4316c0']}, } @inproceedings{Jaco14a, @@ -11035,6 +13640,8 @@ @inproceedings{Jaco14a month = {3}, gsid = {2867289361417330505}, gscites = {2}, + ss_id = {93b9abde7345b389113dd9656f6d7b9a43f261bb}, + all_ss_ids = {['93b9abde7345b389113dd9656f6d7b9a43f261bb']}, } @article{Jaco15, @@ -11052,7 +13659,9 @@ @article{Jaco15 pmid = {25478740}, month = {3}, gsid = {17914761474692895903}, - gscites = {40}, + gscites = {47}, + ss_id = {1727e0a37cb9a85f73d0ca4f64616e9ab5044c99}, + all_ss_ids = {['1727e0a37cb9a85f73d0ca4f64616e9ab5044c99']}, } @article{Jaco15a, @@ -11070,7 +13679,9 @@ @article{Jaco15a pmid = {26443601}, month = {10}, gsid = {16131269046526513617}, - gscites = {54}, + gscites = {102}, + ss_id = {67ae0419e41a69c4796b9e18bf6f59814784a933}, + all_ss_ids = {['67ae0419e41a69c4796b9e18bf6f59814784a933']}, } @phdthesis{Jaco15b, @@ -11085,6 +13696,8 @@ @phdthesis{Jaco15b promotor = {B. van Ginneken and C. M. Schaefer-Prokop}, school = {Radboud University, Nijmegen}, journal = {PhD thesis}, + all_ss_ids = {1dddfd64c4a40269d63014b21ed3ed436f38b98b}, + gscites = {1}, } @article{Jaco19, @@ -11103,8 +13716,10 @@ @article{Jaco19 pmid = {31249401}, month = {6}, gsid = {317052302478593376}, - gscites = {6}, + gscites = {26}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/209067}, + ss_id = {ec14da74fcbd65e013855cea561c2142c68f72fb}, + all_ss_ids = {['ec14da74fcbd65e013855cea561c2142c68f72fb']}, } @conference{Jaco19a, @@ -11138,6 +13753,9 @@ @article{Jaco21 abstract = {Purpose To compare the inter- and intraobserver agreement and reading times achieved when assigning Lung Imaging Reporting and Data System (Lung-RADS) categories to baseline and follow-up lung cancer screening studies by using a dedicated CT lung screening viewer with integrated nodule detection and volumetric support with those achieved by using a standard picture archiving and communication system (PACS)-like viewer. Materials and Methods Data were obtained from the National Lung Screening Trial (NLST). By using data recorded by NLST radiologists, scans were assigned to Lung-RADS categories. For each Lung-RADS category (1 or 2, 3, 4A, and 4B), 40 CT scans (20 baseline scans and 20 follow-up scans) were randomly selected for 160 participants (median age, 61 years; interquartile range, 58-66 years; 61 women) in total. Seven blinded observers independently read all CT scans twice in a randomized order with a 2-week washout period: once by using the standard PACS-like viewer and once by using the dedicated viewer. Observers were asked to assign a Lung-RADS category to each scan and indicate the risk-dominant nodule. Inter- and intraobserver agreement was analyzed by using Fleiss k values and Cohen weighted k values, respectively. Reading times were compared by using a Wilcoxon signed rank test. Results The interobserver agreement was moderate for the standard viewer and substantial for the dedicated viewer, with Fleiss k values of 0.58 (95\% CI: 0.55, 0.60) and 0.66 (95\% CI: 0.64, 0.68), respectively. The intraobserver agreement was substantial, with a mean Cohen weighted k value of 0.67. The median reading time was significantly reduced from 160 seconds with the standard viewer to 86 seconds with the dedicated viewer (P < .001). Conclusion Lung-RADS interobserver agreement increased from moderate to substantial when using the dedicated CT lung screening viewer. The median reading time was substantially reduced when scans were read by using the dedicated CT lung screening viewer.}, optnote = {DIAG, RADIOLOGY}, pmid = {34559005}, + ss_id = {35af94bb3f5654ef60446d79e88621043fb7b543}, + all_ss_ids = {['35af94bb3f5654ef60446d79e88621043fb7b543']}, + gscites = {7}, } @article{Jaco21a, @@ -11154,6 +13772,9 @@ @article{Jaco21a optnote = {DIAG, RADIOLOGY}, pmid = {34870218}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/245169}, + ss_id = {e91661654480905a880033a9f2266343736ea7bd}, + all_ss_ids = {['e91661654480905a880033a9f2266343736ea7bd']}, + gscites = {23}, } @article{Jaco23, @@ -11165,6 +13786,9 @@ @article{Jaco23 file = {Jaco23.pdf:pdf\\Jaco23.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {37540316}, + ss_id = {574577389a89314f2159ca9580264d0c7544257c}, + all_ss_ids = {['574577389a89314f2159ca9580264d0c7544257c']}, + gscites = {0}, } @article{Jara15, @@ -11184,6 +13808,24 @@ @article{Jara15 pmid = {25719895}, } +@article{Jark22, + author = {Jarkman, Sofia and Karlberg, Micael and Poceviciute, Milda and Boden, Anna and Bandi, Peter and Litjens, Geert and Lundstrom, Claes and Treanor, Darren and van der Laak, Jeroen}, + year = {2022}, + month = {11}, + journal = CANCERS, + title = {Generalization of Deep Learning in Digital Pathology: Experience in Breast Cancer Metastasis Detection.}, + doi = {10.3390/cancers14215424}, + issue = {21}, + volume = {14}, + abstract = {Poor generalizability is a major barrier to clinical implementation of artificial intelligence in digital pathology. The aim of this study was to test the generalizability of a pretrained deep learning model to a new diagnostic setting and to a small change in surgical indication. A deep learning model for breast cancer metastases detection in sentinel lymph nodes, trained on CAMELYON multicenter data, was used as a base model, and achieved an AUC of 0.969 (95% CI 0.926-0.998) and FROC of 0.838 (95% CI 0.757-0.913) on CAMELYON16 test data. On local sentinel node data, the base model performance dropped to AUC 0.929 (95% CI 0.800-0.998) and FROC 0.744 (95% CI 0.566-0.912). On data with a change in surgical indication (axillary dissections) the base model performance indicated an even larger drop with a FROC of 0.503 (95%CI 0.201-0.911). The model was retrained with addition of local data, resulting in about a 4% increase for both AUC and FROC for sentinel nodes, and an increase of 11% in AUC and 49% in FROC for axillary nodes. Pathologist qualitative evaluation of the retrained model's output showed no missed positive slides. False positives, false negatives and one previously undetected micro-metastasis were observed. The study highlights the generalization challenge even when using a multicenter trained model, and that a small change in indication can considerably impact the model's performance.}, + file = {:pdf/Jark22.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {36358842}, + ss_id = {c90825e6da28cd71598d38dacf09cd1023ad1a74}, + all_ss_ids = {['c90825e6da28cd71598d38dacf09cd1023ad1a74']}, + gscites = {4}, +} + @conference{Jiao21, author = {Yiping Jiao and Mart Rijthoven and Junhong Li and Katrien Grunberg and Shumin Fei and Francesco Ciompi}, booktitle = {European Congress on Digital Pathology (ECDP)}, @@ -11191,6 +13833,53 @@ @conference{Jiao21 title = {Automatic Lung Cancer Segmentation in Histopathology Whole-Slide Images with Deep Learning}, } +@article{Jiao23, + author = {Jiao, Yiping and van der Laak, Jeroen and Albarqouni, Shadi and Li, Zhang and Tan, Tao and Bhalerao, Abhir and Ma, Jiabo and Sun, Jiamei and Pocock, Johnathan and Pluim, Josien P. W. and Koohbanani, Navid Alemi and Bashir, Raja Muhammad Saad and Raza, Shan E Ahmed and Liu, Sibo and Graham, Simon and Wetstein, Suzanne and Khurram, Syed Ali and Watson, Thomas and Rajpoot, Nasir and Veta, Mitko and Ciompi, Francesco}, + title = {~~LYSTO: The Lymphocyte Assessment Hackathon and Benchmark Dataset}, + doi = {10.48550/ARXIV.2301.06304}, + year = {2023}, + abstract = {We introduce LYSTO, the Lymphocyte Assessment Hackathon, which was held in conjunction with the MICCAI 2019 Conference in Shenzen (China). The competition required participants to automatically assess the number of lymphocytes, in particular T-cells, in histopathological images of colon, breast, and prostate cancer stained with CD3 and CD8 immunohistochemistry. Differently from other challenges setup in medical image analysis, LYSTO participants were solely given a few hours to address this problem. In this paper, we describe the goal and the multi-phase organization of the hackathon; we describe the proposed methods and the on-site results. Additionally, we present post-competition results where we show how the presented methods perform on an independent set of lung cancer slides, which was not part of the initial competition, as well as a comparison on lymphocyte assessment between presented methods and a panel of pathologists. We show that some of the participants were capable to achieve pathologist-level performance at lymphocyte assessment. After the hackathon, LYSTO was left as a lightweight plug-and-play benchmark dataset on grand-challenge website, together with an automatic evaluation platform. LYSTO has supported a number of research in lymphocyte assessment in oncology. LYSTO will be a long-lasting educational challenge for deep learning and digital pathology, it is available at https://lysto.grand-challenge.org/.}, + url = {https://arxiv.org/abs/2301.06304}, + file = {Jiao23.pdf:pdf\\Jiao23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2301.06304}, + automatic = {yes}, +} + +@article{Johk21, + author = {Johkoh, Takeshi and Lee, Kyung Soo and Nishino, Mizuki and Travis, William D. and Ryu, Jay H. and Lee, Ho Yun and Ryerson, Christopher J. and Franquet, Tom\'{a}s and Bankier, Alexander A. and Brown, Kevin K. and Goo, Jin Mo and Kauczor, Hans-Ulrich and Lynch, David A. and Nicholson, Andrew G. and Richeldi, Luca and Schaefer-Prokop, Cornelia M. and Verschakelen, Johny and Raoof, Suhail and Rubin, Geoffrey D. and Powell, Charles and Inoue, Yoshikazu and Hatabu, Hiroto}, + title = {~~Chest CT Diagnosis and Clinical Management of Drug-Related Pneumonitis in Patients Receiving Molecular Targeting Agents and Immune Checkpoint Inhibitors}, + doi = {10.1016/j.chest.2020.11.027}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.chest.2020.11.027}, + file = {Johk21.pdf:pdf\\Johk21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Chest}, + automatic = {yes}, + citation-count = {45}, + pages = {1107-1125}, + volume = {159}, + pmid = {33434111}, +} + +@article{Johk21, + author = {Johkoh, Takeshi and Lee, Kyung Soo and Nishino, Mizuki and Travis, William D. and Ryu, Jay H. and Lee, Ho Yun and Ryerson, Christopher J. and Franquet, Tom\'{a}s and Bankier, Alexander A. and Brown, Kevin K. and Goo, Jin Mo and Kauczor, Hans-Ulrich and Lynch, David A. and Nicholson, Andrew G. and Richeldi, Luca and Schaefer-Prokop, Cornelia M. and Verschakelen, Johny and Raoof, Suhail and Rubin, Geoffrey D. and Powell, Charles and Inoue, Yoshikazu and Hatabu, Hiroto}, + title = {~~Chest CT Diagnosis and Clinical Management of Drug-Related Pneumonitis in Patients Receiving Molecular Targeting Agents and Immune Checkpoint Inhibitors}, + doi = {10.1016/j.chest.2020.11.027}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.chest.2020.11.027}, + file = {Johk21.pdf:pdf\\Johk21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Chest}, + automatic = {yes}, + citation-count = {45}, + pages = {1107-1125}, + volume = {159}, + pmid = {33434111}, +} + @article{Jong10, author = {P. A. de Jong and J. A. Achterberg and O. A. M. Kessels and B. van Ginneken and L. Hogeweg and F. J. Beek and S. W. J. Terheggen-Lagro}, title = {Modified {C}hrispin-{N}orman chest radiography score for cystic fibrosis: observer agreement and correlation with lung function}, @@ -11205,7 +13894,9 @@ @article{Jong10 pmid = {20924586}, month = {10}, gsid = {14856487864024079881}, - gscites = {12}, + gscites = {13}, + ss_id = {0ee868fc1c16664777ec71bbe38c9ad026e2525a}, + all_ss_ids = {['0ee868fc1c16664777ec71bbe38c9ad026e2525a']}, } @article{Jong14, @@ -11240,13 +13931,29 @@ @article{Jong14a optnote = {DIAG}, } -@Conference{Jong22, - author = {de Jong, Steffen and Alves, Nat\'{a}lia and Schuurmans, Megan and Hermans, John and Huisman, Henkjan}, +@conference{Jong22, + author = {de Jong, Steffen and Alves, Nat\'{a}lia and Schuurmans, Megan and Hermans, John and Huisman, Henkjan}, booktitle = RSNA, - title = {Deep Learning for Automatic Contrast Enhancement Phase Detection on Abdominal Computed Tomography}, - abstract = {Purpose: Develop and validate a convolutional neural network (CNN) for automatic classification of intravenous contrast enhancement (CE) phase on abdominal computed tomography (CT) scans. Materials and Methods: This retrospective study included 2000 abdominal CE-CT scans from our centre, which had information about the CE phase on the DICOM header. In addition to the baseline non-contrast scan (non-CE), three contrast phases were considered at different time intervals after contrast agent administration: the arterial phase (AP) at 40-50 seconds, the portal-venous phase (PVP) at 60-70 seconds and the delayed phase (DP) at 150-240 seconds. The distribution of CE phases on the training/validation dataset was 25.3% non-CE, 25.3% AP, 27.7% PVP and 21.7% DP. Half of the scans were used for model training/validation and a half for independent model testing. A 3-dimensional Inception CNN was trained to perform this multiclass classification task using stratified 4-fold cross-validation for 100 epochs. The model predictions from the 4 folds were ensembled to generate a final likelihood score for each contrast phase in the independent test set. The phase with the highest score was then chosen as the final model output. Results: The overall accuracy for the multiclass CNN was 92.4%, with only 76 out of the 1000 scans in the independent test set being incorrectly classified. The individual accuracies for each phase were 97.6% (244-250) for non-CE, 95.6% (239-50) for AP, 85.6% (214-250) for PVP and 90.8% (227-250) for DP. The most frequent mistake was in differentiating between the portal venous and delayed phases. This can be explained by the reduced amount of contrast agent in the abdomen and the high patient-specific and varied appearance of these two phases within the same protocol. Increasing the size and variety of the training data may improve performance for these two phases. Conclusion: Deep learning can accurately determine intravenous CE phase on multi-phase CE abdominal CT scans. Clinical Relevance Statement: CE information is critical to diagnosis but often lacks from the image metadata. DL can accurately classify CE phases, important for AI automation and the curation of high-quality training datasets.}, - optnote = {DIAG, RADIOLOGY}, - year = {2022}, + title = {Deep Learning for Automatic Contrast Enhancement Phase Detection on Abdominal Computed Tomography}, + abstract = {Purpose: Develop and validate a convolutional neural network (CNN) for automatic classification of intravenous contrast enhancement (CE) phase on abdominal computed tomography (CT) scans. Materials and Methods: This retrospective study included 2000 abdominal CE-CT scans from our centre, which had information about the CE phase on the DICOM header. In addition to the baseline non-contrast scan (non-CE), three contrast phases were considered at different time intervals after contrast agent administration: the arterial phase (AP) at 40-50 seconds, the portal-venous phase (PVP) at 60-70 seconds and the delayed phase (DP) at 150-240 seconds. The distribution of CE phases on the training/validation dataset was 25.3% non-CE, 25.3% AP, 27.7% PVP and 21.7% DP. Half of the scans were used for model training/validation and a half for independent model testing. A 3-dimensional Inception CNN was trained to perform this multiclass classification task using stratified 4-fold cross-validation for 100 epochs. The model predictions from the 4 folds were ensembled to generate a final likelihood score for each contrast phase in the independent test set. The phase with the highest score was then chosen as the final model output. Results: The overall accuracy for the multiclass CNN was 92.4%, with only 76 out of the 1000 scans in the independent test set being incorrectly classified. The individual accuracies for each phase were 97.6% (244-250) for non-CE, 95.6% (239-50) for AP, 85.6% (214-250) for PVP and 90.8% (227-250) for DP. The most frequent mistake was in differentiating between the portal venous and delayed phases. This can be explained by the reduced amount of contrast agent in the abdomen and the high patient-specific and varied appearance of these two phases within the same protocol. Increasing the size and variety of the training data may improve performance for these two phases. Conclusion: Deep learning can accurately determine intravenous CE phase on multi-phase CE abdominal CT scans. Clinical Relevance Statement: CE information is critical to diagnosis but often lacks from the image metadata. DL can accurately classify CE phases, important for AI automation and the curation of high-quality training datasets.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@article{K.17, + author = {K., Bhavya and K., Anjali and P., Remya}, + title = {~~Localization of Optic Disc and Blood Vessel Segmentation: A Survey}, + doi = {10.5120/ijca2017913091}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.5120/IJCA2017913091}, + file = {K.17.pdf:pdf\\K.17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {International Journal of Computer Applications}, + automatic = {yes}, + citation-count = {0}, + pages = {32-36}, + volume = {160}, } @inproceedings{Kabu09, @@ -11263,6 +13970,8 @@ @inproceedings{Kabu09 optnote = {DIAG, RADIOLOGY}, gsid = {7155069862442251787}, gscites = {82}, + ss_id = {fb4ba9e98bc756d05ae3d26c035386efea55abe6}, + all_ss_ids = {['fb4ba9e98bc756d05ae3d26c035386efea55abe6']}, } @article{Kale94, @@ -11295,7 +14004,9 @@ @article{Kall08 pmid = {19001703}, month = {11}, gsid = {1413812771889032046}, - gscites = {20}, + gscites = {21}, + ss_id = {88ba982955fdc0ae3283fe0b6bebf48dcc828a74}, + all_ss_ids = {['88ba982955fdc0ae3283fe0b6bebf48dcc828a74']}, } @inproceedings{Kall08a, @@ -11313,6 +14024,8 @@ @inproceedings{Kall08a month = {3}, gsid = {8546414876869380852}, gscites = {1}, + ss_id = {32fbe24d39ceed1d26bbc023bbcffeb96a1a784e}, + all_ss_ids = {['32fbe24d39ceed1d26bbc023bbcffeb96a1a784e']}, } @inproceedings{Kall08b, @@ -11328,6 +14041,8 @@ @inproceedings{Kall08b optnote = {DIAG, RADIOLOGY}, gsid = {7726322658574425848}, gscites = {4}, + ss_id = {21cc4473c838a1e2a8e3edeef62dab825da689bd}, + all_ss_ids = {['21cc4473c838a1e2a8e3edeef62dab825da689bd']}, } @inproceedings{Kall09, @@ -11345,6 +14060,8 @@ @inproceedings{Kall09 month = {2}, gsid = {7746887343139581131}, gscites = {2}, + ss_id = {6bae8c8b793d36dbdb318470792f9b1708206801}, + all_ss_ids = {['6bae8c8b793d36dbdb318470792f9b1708206801']}, } @inproceedings{Kall10, @@ -11359,7 +14076,9 @@ @inproceedings{Kall10 file = {Kall10.pdf:pdf/Kall10.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {1932168475005237622}, - gscites = {4}, + gscites = {5}, + ss_id = {4da603618ab0093c1eb4f79bd10741cdad0a1184}, + all_ss_ids = {['4da603618ab0093c1eb4f79bd10741cdad0a1184']}, } @inproceedings{Kall11, @@ -11376,6 +14095,9 @@ @inproceedings{Kall11 optnote = {DIAG, RADIOLOGY}, number = {1}, month = {3}, + ss_id = {eee3029e205dda52db4cb876ba59aa538a48ea73}, + all_ss_ids = {['eee3029e205dda52db4cb876ba59aa538a48ea73']}, + gscites = {0}, } @article{Kall11a, @@ -11394,6 +14116,8 @@ @article{Kall11a month = {4}, gsid = {5777414477034271057}, gscites = {54}, + ss_id = {d5145a7cb6447a37cfa8eaf072f282e16e1e1750}, + all_ss_ids = {['d5145a7cb6447a37cfa8eaf072f282e16e1e1750']}, } @article{Kall12, @@ -11412,6 +14136,8 @@ @article{Kall12 month = {1}, gsid = {9781542785093686333}, gscites = {20}, + ss_id = {d7a2d54955eb4d75b6226e469f3136050225a7b2}, + all_ss_ids = {['d7a2d54955eb4d75b6226e469f3136050225a7b2']}, } @article{Kall12a, @@ -11430,6 +14156,8 @@ @article{Kall12a month = {7}, gsid = {3311103280421548503}, gscites = {32}, + ss_id = {53f8f940e1c62525b80fcb5e787aa688a93f18cc}, + all_ss_ids = {['53f8f940e1c62525b80fcb5e787aa688a93f18cc']}, } @conference{Kall12b, @@ -11498,7 +14226,9 @@ @article{Kall16 pmid = {26915120}, month = {5}, gsid = {14584771914130279184}, - gscites = {249}, + gscites = {353}, + ss_id = {47c18899d0d264ff72ca2376ed8bb2d821f13a98}, + all_ss_ids = {['47c18899d0d264ff72ca2376ed8bb2d821f13a98']}, } @inproceedings{Kall16a, @@ -11513,6 +14243,9 @@ @inproceedings{Kall16a abstract = {Breast cancer risk assessment is becoming increasingly important in clinical practice. It has been suggested that features that characterize mammographic texture are more predictive for breast cancer than breast density. Yet, strong correlation between both types of features is an issue in many studies. In this work we investigate a method to generate texture features and / or scores that are independent of breast density. The method is especially useful in settings where features are learned from the data itself. We evaluate our method on a case control set comprising 394 cancers, and 1182 healthy controls. We show that the learned density independent texture features are significantly associated with breast cancer risk. As such it may aid in exploring breast characteristics that are predictive of breast cancer irrespective of breast density. Furthermore it offers opportunities to enhance personalized breast cancer screening beyond breast density.}, file = {:pdf/Kall16a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {41ff64a90222b4bf103f42c5030ad5adb498a840}, + all_ss_ids = {['41ff64a90222b4bf103f42c5030ad5adb498a840']}, + gscites = {0}, } @conference{Kall16b, @@ -11521,11 +14254,25 @@ @conference{Kall16b booktitle = RSNA, year = {2016}, abstract = {PURPOSE In personalized breast cancer screening stratification is commonly based on breast density. It has been suggested though, that breast density is a too coarse descriptor for breast cancer risk. Several authors have developed texture features that are potentially more predictive of breast cancer. Yet, in several studies, strong correlation between both types of features is an issue. In this work we investigate a method to generate deep learning texture features that are independent of breast density. - METHOD AND MATERIALS From the Dutch breast cancer screening program we collected 394 cancers and 1182 age matched healthy controls. To obtain mammograms without signs of cancerous tissue, we took the contralateral mammograms. For each image breast density was computed using automated software. Texture features were automatically learned from the data by means of techniques that are commonly used in deep learning. In the initial matching, breast density was on average higher in the cases than in the controls, as breast density is associated with breast cancer risk. Texture features and scores learned on this set (Td) are determined to be correlated to density. In order to obtain density independent features and scores (Ti) we balanced breast density over the cases and the controls by performing a rematching based on breast density. Non-matching cases and controls were excluded during training; in the testing phase all images were scored. We trained and tested Td and Ti to separate between cancers and controls with 5-fold cross-validation. We compared the performance of Td and Ti in terms of predictive power. - RESULTS Spearman's rank correlation between density and Td was 0.81 (0.79-0.83). The density adjusted odds ratios for breast cancer were 1.15 (0.81-1.65), 1.40 (0.98-2.00), and 1.39 (0.92-2.09) for quartile 2-4 respectively, relative to quartile 1. For Ti the correlation with density was 0.00 (-0.06 - 0.05). The odds ratios were 1.15 (0.82-1.62), 1.33 (0.96-1.86), and 1.45 (1.05-2.01). The AUC for separating cancers from controls was 0.539 (0.506-0.572). - CONCLUSION We developed a method for generating density independent texture features and scores. The obtained texture scores were significantly associated with breast cancer risk. - CLINICAL RELEVANCE/APPLICATION The obtained density independent texture features may enhance breast cancer risk models beyond breast density, and as such offer opportunities to further optimize personalized breast cancer screening.}, + METHOD AND MATERIALS From the Dutch breast cancer screening program we collected 394 cancers and 1182 age matched healthy controls. To obtain mammograms without signs of cancerous tissue, we took the contralateral mammograms. For each image breast density was computed using automated software. Texture features were automatically learned from the data by means of techniques that are commonly used in deep learning. In the initial matching, breast density was on average higher in the cases than in the controls, as breast density is associated with breast cancer risk. Texture features and scores learned on this set (Td) are determined to be correlated to density. In order to obtain density independent features and scores (Ti) we balanced breast density over the cases and the controls by performing a rematching based on breast density. Non-matching cases and controls were excluded during training; in the testing phase all images were scored. We trained and tested Td and Ti to separate between cancers and controls with 5-fold cross-validation. We compared the performance of Td and Ti in terms of predictive power. + RESULTS Spearman's rank correlation between density and Td was 0.81 (0.79-0.83). The density adjusted odds ratios for breast cancer were 1.15 (0.81-1.65), 1.40 (0.98-2.00), and 1.39 (0.92-2.09) for quartile 2-4 respectively, relative to quartile 1. For Ti the correlation with density was 0.00 (-0.06 - 0.05). The odds ratios were 1.15 (0.82-1.62), 1.33 (0.96-1.86), and 1.45 (1.05-2.01). The AUC for separating cancers from controls was 0.539 (0.506-0.572). + CONCLUSION We developed a method for generating density independent texture features and scores. The obtained texture scores were significantly associated with breast cancer risk. + CLINICAL RELEVANCE/APPLICATION The obtained density independent texture features may enhance breast cancer risk models beyond breast density, and as such offer opportunities to further optimize personalized breast cancer screening.}, + optnote = {DIAG, RADIOLOGY}, +} + +@inproceedings{Kall19, + author = {Kallenberg, Michiel and Vanegas Camargo, Doiriel and Birhanu, Mahlet and Gubern-M\'{e}rida, Albert and Karssemeijer, Nico}, + title = {~~A deep learning method for volumetric breast density estimation from processed full field digital mammograms}, + doi = {10.1117/12.2512818}, + year = {2019}, + abstract = {Breast density is an important factor in breast cancer screening. Methods exist to measure the volume of dense breast tissue from 2D mammograms. However, these methods can only be applied to raw mammograms. Breast density classification methods that have been developed for processed mammograms are commonly based on radiologist Breast Imaging and Reporting Data System (BI-RADS) annotations. Unfortunately, such labels are subjective and may introduce personal bias and inter-reader discrepancy. In order to avoid such limitations, this paper presents a method for estimation of percent dense tissue volume (PDV) from processed full field digital mammograms (FFDM) using a deep learning approach. A convolutional neural network (CNN) was implemented to carry out a regression task of estimating PDV using density measurement on raw FFDM as a ground truth. The dataset used for training, validation, and testing (Set A) includes over 2000 clinical cases from 3 different vendors. Our results show a high correlation of the predicted PDV to raw measurements, with a Spearman's correlation coefficient of r=0.925. The CNN was also tested on an independent set of 97 clinical cases (Set B) for which PDV measurements from FFDM and MRI were available. CNN predictions on Set B showed a high correlation with both raw FFDM and MRI data (r=0.897 and r=0.903, respectively). Set B had radiologist annotated BI-RADS labels, which agreed with the estimated values to a high degree, showing the ability of our CNN to make a distinction between different BI-RADS categories comparable to methods applied to raw mammograms.}, + url = {http://dx.doi.org/10.1117/12.2512818}, + file = {Kall19.pdf:pdf\\Kall19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2019: Computer-Aided Diagnosis}, + automatic = {yes}, + citation-count = {0}, } @mastersthesis{Kamp18, @@ -11538,6 +14285,54 @@ @mastersthesis{Kamp18 journal = {Master thesis}, } +@article{Kamp22, + author = {van der Kamp, Ananda and Waterlander, Tomas J. and de Bel, Thomas and van der Laak, Jeroen and van den Heuvel-Eibrink, Marry M. and Mavinkurve-Groothuis, Annelies M. C. and de Krijger, Ronald R.}, + title = {~~Artificial Intelligence in Pediatric Pathology: The Extinction of a Medical Profession or the Key to a Bright Future?}, + doi = {10.1177/10935266211059809}, + year = {2022}, + abstract = { Artificial Intelligence (AI) has become of increasing interest over the past decade. While digital image analysis (DIA) is already being used in radiology, it is still in its infancy in pathology. One of the reasons is that large-scale digitization of glass slides has only recently become available. With the advent of digital slide scanners, that digitize glass slides into whole slide images, many labs are now in a transition phase towards digital pathology. However, only few departments worldwide are currently fully digital. Digital pathology provides the ability to annotate large datasets and train computers to develop and validate robust algorithms, similar to radiology. In this opinionated overview, we will give a brief introduction into AI in pathology, discuss the potential positive and negative implications and speculate about the future role of AI in the field of pediatric pathology. }, + url = {http://dx.doi.org/10.1177/10935266211059809}, + file = {Kamp22.pdf:pdf\\Kamp22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Pediatric and Developmental Pathology}, + automatic = {yes}, + citation-count = {3}, + pages = {380-387}, + volume = {25}, + pmid = {35238696}, +} + +@article{Kamp23, + author = {van der Kamp, Ananda and de Bel, Thomas and van Alst, Ludo and Rutgers, Jikke and van den Heuvel-Eibrink, Marry M. and Mavinkurve-Groothuis, Annelies M. C. and van der Laak, Jeroen and de Krijger, Ronald R.}, + title = {~~Automated Deep Learning-Based Classification of Wilms Tumor Histopathology}, + doi = {10.3390/cancers15092656}, + year = {2023}, + abstract = {(1) Background: Histopathological assessment of Wilms tumors (WT) is crucial for risk group classification to guide postoperative stratification in chemotherapy pre-treated WT cases. However, due to the heterogeneous nature of the tumor, significant interobserver variation between pathologists in WT diagnosis has been observed, potentially leading to misclassification and suboptimal treatment. We investigated whether artificial intelligence (AI) can contribute to accurate and reproducible histopathological assessment of WT through recognition of individual histopathological tumor components. (2) Methods: We assessed the performance of a deep learning-based AI system in quantifying WT components in hematoxylin and eosin-stained slides by calculating the S\orensen-Dice coefficient for fifteen predefined renal tissue components, including six tumor-related components. We trained the AI system using multiclass annotations from 72 whole-slide images of patients diagnosed with WT. (3) Results: The overall Dice coefficient for all fifteen tissue components was 0.85 and for the six tumor-related components was 0.79. Tumor segmentation worked best to reliably identify necrosis (Dice coefficient 0.98) and blastema (Dice coefficient 0.82). (4) Conclusions: Accurate histopathological classification of WT may be feasible using a digital pathology-based AI system in a national cohort of WT patients.}, + url = {http://dx.doi.org/10.3390/cancers15092656}, + file = {Kamp23.pdf:pdf\\Kamp23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancers}, + automatic = {yes}, + citation-count = {0}, + pages = {2656}, + volume = {15}, + pmid = {37174121}, +} + +@inproceedings{Kand16, + author = {Kandemir, Melih and Haussmann, Manuel and Diego, Ferran and Rajamani, Kumar and Laak, JeroenVanDer and Hamprecht, Fred}, + title = {~~Variational Weakly Supervised Gaussian Processes}, + doi = {10.5244/c.30.71}, + year = {2016}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.5244/C.30.71}, + file = {Kand16.pdf:pdf\\Kand16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Procedings of the British Machine Vision Conference 2016}, + automatic = {yes}, + citation-count = {3}, +} + @conference{Kare11, author = {Gopal Karemore and Sami Brandt and Nico Karssemeijer and Mads Nielsen}, title = {Discovery of Mammogram Regions That Show Early Changes Due to the Development of Breast Cancer: A Preliminary Work}, @@ -11563,6 +14358,8 @@ @article{Kare14 month = {10}, gsid = {11356061311270828475}, gscites = {5}, + ss_id = {e19cdbf6fdb3c4b3e983841d91f06ac3c63fb59a}, + all_ss_ids = {['e19cdbf6fdb3c4b3e983841d91f06ac3c63fb59a']}, } @article{Kars03, @@ -11596,7 +14393,9 @@ @inproceedings{Kars04 optnote = {DIAG, RADIOLOGY}, month = {5}, gsid = {14634595103328879627}, - gscites = {11}, + gscites = {13}, + ss_id = {9ed02f28fca58adb93cb13413cfa33a623dae44d}, + all_ss_ids = {['9ed02f28fca58adb93cb13413cfa33a623dae44d']}, } @inproceedings{Kars05, @@ -11614,6 +14413,8 @@ @inproceedings{Kars05 pmid = {17354701}, gsid = {17011624032764085649}, gscites = {3}, + ss_id = {48b2c69dd2ce2556fd02fdee659f8db428fe95e2}, + all_ss_ids = {['48b2c69dd2ce2556fd02fdee659f8db428fe95e2']}, } @article{Kars06, @@ -11630,7 +14431,9 @@ @article{Kars06 pmid = {17209117}, month = {12}, gsid = {4534465409697415448}, - gscites = {30}, + gscites = {34}, + ss_id = {e87c9bd2c96075f92822b5ffe858db76c329fb09}, + all_ss_ids = {['e87c9bd2c96075f92822b5ffe858db76c329fb09']}, } @inproceedings{Kars08, @@ -11646,6 +14449,8 @@ @inproceedings{Kars08 optnote = {DIAG, RADIOLOGY}, gsid = {9233122123317016556}, gscites = {6}, + ss_id = {e40c27c42582851293a3f05817019972b24eb06e}, + all_ss_ids = {['e40c27c42582851293a3f05817019972b24eb06e']}, } @inproceedings{Kars08a, @@ -11697,6 +14502,8 @@ @inproceedings{Kars10 optnote = {DIAG, RADIOLOGY}, gsid = {4540362409635407015}, gscites = {3}, + ss_id = {8da87939da5e8a3ab8bffa875cfcdee748bc2280}, + all_ss_ids = {['8da87939da5e8a3ab8bffa875cfcdee748bc2280']}, } @book{Kars10b, @@ -11751,10 +14558,10 @@ @conference{Kars16 booktitle = RSNA, year = {2016}, abstract = {PURPOSE While firm breast compression is generally thought to be required for high quality mammograms the relationship between the amount of compression and screening performance has not been studied systematically. The aim of this study is to determine breast cancer screening outcomes in relation to the compression pressure applied during mammography. - METHOD AND MATERIALS A consecutive series of 111,870 digital screening examinations performed in 53,684 women between July 2003 and December 2011 was collected from a screening centre operating within a nationwide breast cancer screening program. A total of 662 screen-detected cancers were included in this series, while 280 interval cancers corresponding to the selected exams were identified by linkage to the Dutch Cancer Registry. Using a research version of Volpara Density software (Volpara Solutions, Wellington, NZ) breast volume (V), dense tissue volume (VD), and volumetric density grade (VDG), were estimated for each exam, while compression pressure was estimated for medio-lateral oblique (MLO) view by dividing the compression force by the area of contact surface between the breast and the compression paddle. We calculated frequencies of recalls, screen-detected cancers, and interval cancers stratified by compression pressure in five groups and derived program sensitivity, specificity, and positive predictive value (PPV). In addition, for each group we computed mean values of V, VD, and VDG. For statistical analysis Pearson's Chi-squared test was used. - RESULTS Screening outcomes were different in the five compression pressure groups (p=0.004). Program sensitivity decreased with increasing pressure (77.0%, 69.7%, 74.5%, 63.2%, 66.7%) (p=0.02), specificity was similar, and PPV was highest in the midrange of pressure (28.5%, 31.0%, 34.2%, 26.7%, 25.7%) (p=0.03). Cutoff points for pressure dividing the data in groups of 20% were 7.7, 9.2, 10.7, 12.8 kPa. V and VD both decreased with increasing pressure. Mean VDG moderately increased (1.75, 2.0, 2.2, 2.4, 2.8). - CONCLUSION Results suggest that if too much pressure is applied during mammography this may increase interval cancer rates and decrease PPV. - CLINICAL RELEVANCE/APPLICATION Controlling pressure during mammography is important to decrease the discomfort experienced by women, but it may also be required to optimize screening outcomes.}, + METHOD AND MATERIALS A consecutive series of 111,870 digital screening examinations performed in 53,684 women between July 2003 and December 2011 was collected from a screening centre operating within a nationwide breast cancer screening program. A total of 662 screen-detected cancers were included in this series, while 280 interval cancers corresponding to the selected exams were identified by linkage to the Dutch Cancer Registry. Using a research version of Volpara Density software (Volpara Solutions, Wellington, NZ) breast volume (V), dense tissue volume (VD), and volumetric density grade (VDG), were estimated for each exam, while compression pressure was estimated for medio-lateral oblique (MLO) view by dividing the compression force by the area of contact surface between the breast and the compression paddle. We calculated frequencies of recalls, screen-detected cancers, and interval cancers stratified by compression pressure in five groups and derived program sensitivity, specificity, and positive predictive value (PPV). In addition, for each group we computed mean values of V, VD, and VDG. For statistical analysis Pearson's Chi-squared test was used. + RESULTS Screening outcomes were different in the five compression pressure groups (p=0.004). Program sensitivity decreased with increasing pressure (77.0%, 69.7%, 74.5%, 63.2%, 66.7%) (p=0.02), specificity was similar, and PPV was highest in the midrange of pressure (28.5%, 31.0%, 34.2%, 26.7%, 25.7%) (p=0.03). Cutoff points for pressure dividing the data in groups of 20% were 7.7, 9.2, 10.7, 12.8 kPa. V and VD both decreased with increasing pressure. Mean VDG moderately increased (1.75, 2.0, 2.2, 2.4, 2.8). + CONCLUSION Results suggest that if too much pressure is applied during mammography this may increase interval cancer rates and decrease PPV. + CLINICAL RELEVANCE/APPLICATION Controlling pressure during mammography is important to decrease the discomfort experienced by women, but it may also be required to optimize screening outcomes.}, optnote = {DIAG, RADIOLOGY}, } @@ -11823,6 +14630,8 @@ @article{Kars90 month = {3}, gsid = {16194894447799709940}, gscites = {26}, + ss_id = {0ea655e5dd94e51e3658e006318844d2524927da}, + all_ss_ids = {['0ea655e5dd94e51e3658e006318844d2524927da']}, } @article{Kars90b, @@ -11841,6 +14650,8 @@ @article{Kars90b month = {1}, gsid = {12251457361547574270}, gscites = {25}, + ss_id = {84bc3ee567e0b5e9d9b3998898d700878376ed78}, + all_ss_ids = {['84bc3ee567e0b5e9d9b3998898d700878376ed78']}, } @inproceedings{Kars91, @@ -11859,6 +14670,8 @@ @inproceedings{Kars91 month = {6}, gsid = {17755276782765678459}, gscites = {28}, + ss_id = {cc026f046a6629ef7be86172cb75cbd1410fa183}, + all_ss_ids = {['cc026f046a6629ef7be86172cb75cbd1410fa183']}, } @article{Kars92, @@ -11894,6 +14707,8 @@ @article{Kars93 month = {5}, gsid = {1209704105351459911}, gscites = {121}, + ss_id = {bfcdd7d4a73cc53ef4e9185b6f1fea39a26533ce}, + all_ss_ids = {['bfcdd7d4a73cc53ef4e9185b6f1fea39a26533ce']}, } @inproceedings{Kars93a, @@ -11908,7 +14723,9 @@ @inproceedings{Kars93a optnote = {DIAG, RADIOLOGY}, month = {7}, gsid = {16235929636618064279}, - gscites = {9}, + gscites = {12}, + ss_id = {eb0f63cfc6f5acf355aacb0d973a06304c43d5cd}, + all_ss_ids = {['eb0f63cfc6f5acf355aacb0d973a06304c43d5cd']}, } @inproceedings{Kars93b, @@ -11926,6 +14743,8 @@ @inproceedings{Kars93b month = {7}, gsid = {11337681894043990168}, gscites = {51}, + ss_id = {06f1f4bef8e1b2d90068fc9d4feb542fabbe836e}, + all_ss_ids = {['06f1f4bef8e1b2d90068fc9d4feb542fabbe836e']}, } @article{Kars93c, @@ -11941,6 +14760,8 @@ @article{Kars93c month = {12}, gsid = {16803864324185696158}, gscites = {203}, + ss_id = {f4305a7996f618faff4ec968ef883d2769f7fac6}, + all_ss_ids = {['f4305a7996f618faff4ec968ef883d2769f7fac6']}, } @article{Kars96a, @@ -11957,6 +14778,8 @@ @article{Kars96a pmid = {18215942}, gsid = {15292609448935173520}, gscites = {373}, + ss_id = {d4f072532690ae1433a2eaf7c7f682ee8df294a7}, + all_ss_ids = {['d4f072532690ae1433a2eaf7c7f682ee8df294a7']}, } @article{Kars96b, @@ -11970,6 +14793,8 @@ @article{Kars96b optnote = {DIAG, RADIOLOGY}, gsid = {11701657857381569600}, gscites = {91}, + ss_id = {4ca79902a77995baa090ff5c6c9a56673dcc48e5}, + all_ss_ids = {['4ca79902a77995baa090ff5c6c9a56673dcc48e5']}, } @article{Kars97, @@ -12005,6 +14830,8 @@ @article{Kars98 month = {2}, gsid = {5420073140584548660}, gscites = {337}, + ss_id = {1ac5f398c1eeae7189800c003035dc7248298f93}, + all_ss_ids = {['1ac5f398c1eeae7189800c003035dc7248298f93']}, } @inproceedings{Kars99, @@ -12021,6 +14848,8 @@ @inproceedings{Kars99 optnote = {DIAG, RADIOLOGY}, gsid = {4460532311555312320}, gscites = {20}, + ss_id = {9ceb82b0ab095f63e41811004ae78929c62cf326}, + all_ss_ids = {['9ceb82b0ab095f63e41811004ae78929c62cf326']}, } @article{Kars99a, @@ -12036,6 +14865,8 @@ @article{Kars99a pmid = {10578420}, gsid = {1871882345363874725}, gscites = {12}, + ss_id = {344e4a1363dad274ac88f6887d509e84340f1e8c}, + all_ss_ids = {['344e4a1363dad274ac88f6887d509e84340f1e8c']}, } @article{Kart21a, @@ -12053,6 +14884,28 @@ @article{Kart21a pmid = {34393083}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/238871}, + ss_id = {014718afc12c16c0a80091c389bc661933484793}, + all_ss_ids = {['014718afc12c16c0a80091c389bc661933484793']}, + gscites = {13}, +} + +@article{Kate23, + author = {Katende, Bulemba and Bresser, Moniek and Kamele, Mashaete and Chere, Lebohang and Tlahali, Mosa and Erhardt, Rahel Milena and Muhairwe, Josephine and Ayakaka, Irene and Glass, Tracy R. and Ruhwald, Morten and van Ginneken, Bram and Murphy, Keelin and de Vos, Margaretha and Amstutz, Alain and Mareka, Mathabo and Mooko, Sekhele Matabo and Reither, Klaus and Gonz\'{a}lez Fern\'{a}ndez, Lucia}, + title = {Impact of a multi-disease integrated screening and diagnostic model for COVID-19, TB, and HIV in Lesotho}, + doi = {10.1371/journal.pgph.0001488}, + year = {2023}, + abstract = {The surge of the COVID-19 pandemic challenged health services globally, and in Lesotho, the HIV and tuberculosis (TB) services were similarly affected. Integrated, multi-disease diagnostic services were proposed solutions to mitigate these disruptions. We describe and evaluate the effect of an integrated, hospital-based COVID-19, TB and HIV screening and diagnostic model in two rural districts in Lesotho, during the period between December 2020 and August 2022. Adults, hospital staff, and children above 5 years attending two hospitals were pre-screened for COVID-19 and TB symptoms. After a positive pre-screening, participants were offered to enroll in a service model that included clinical evaluation, chest radiography, SARS-CoV-2, TB, and HIV testing. Participants diagnosed with COVID-19, TB, or HIV were contacted after 28 days to evaluate their health status and linkage to HIV and/or TB care services. Of the 179160 participants pre-screened, 6623(3.7%) pre-screened positive, and 4371(66%) were enrolled in this service model. Of the total 458 diagnoses, only 17 happened in children. One positive rapid antigen test for SARS-CoV-2 was found per 11 participants enrolled, one Xpert-positive TB case was diagnosed per 85 people enrolled, and 1 new HIV diagnosis was done per 182 people enrolled. Of the 321(82.9%) participants contacted after 28 days of diagnosis, 304(94.7%) reported to be healthy. Of the individuals that were newly diagnosed with HIV or TB, 18/24(75.0%) and 46/51(90.1%) started treatment within 28 days of the diagnosis. This screening and diagnostic model successfully maintained same-day, integrated COVID-19, TB, and HIV testing services, despite frequent disruptions caused by the surge of COVID-19 waves, healthcare seeking patterns, and the volatile context (social measures, travel restrictions, population lockdowns). There were positive effects in avoiding diagnostic delays and ensuring linkage to services, however, diagnostic yields for adults and children were low. To inform future preparedness plans, research will need to identify essential health interventions and how to optimize them along each phase of the emergency response.}, + url = {http://dx.doi.org/10.1371/journal.pgph.0001488}, + file = {Kate23.pdf:pdf\Kate23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {PLOS Global Public Health}, + citation-count = {0}, + automatic = {yes}, + pages = {e0001488}, + volume = {3}, + all_ss_ids = {ce40309e3a7d5319ea2337c2d44f75bca3761e78}, + pmid = {37531333}, + gscites = {2}, } @article{Kauc20, @@ -12064,22 +14917,25 @@ @article{Kauc20 pages = {3277-3294}, doi = {10.1007/s00330-020-06727-7}, abstract = {In Europe, lung cancer ranks third among the most common cancers, remaining the biggest killer. Since the publication of the first European Society of Radiology and European Respiratory Society joint white paper on lung cancer screening (LCS) in 2015, many new findings have been published and discussions have increased considerably. Thus, this updated expert opinion represents a narrative, non-systematic review of the evidence from LCS trials and description of the current practice of LCS as well as aspects that have not received adequate attention until now. Reaching out to the potential participants (persons at high risk), optimal communication and shared decision-making will be key starting points. Furthermore, standards for infrastructure, pathways and quality assurance are pivotal, including promoting tobacco cessation, benefits and harms, overdiagnosis, quality, minimum radiation exposure, definition of management of positive screen results and incidental findings linked to respective actions as well as cost-effectiveness. This requires a multidisciplinary team with experts from pulmonology and radiology as well as thoracic oncologists, thoracic surgeons, pathologists, family doctors, patient representatives and others. The ESR and ERS agree that Europe's health systems need to adapt to allow citizens to benefit from organised pathways, rather than unsupervised initiatives, to allow early diagnosis of lung cancer and reduce the mortality rate. Now is the time to set up and conduct demonstration programmes focusing, among other points, on methodology, standardisation, tobacco cessation, education on healthy lifestyle, cost-effectiveness and a central registry. - - Key Points - - * Pulmonologists and radiologists both have key roles in the set up of multidisciplinary LCS teams with experts from many other fields. - - * Pulmonologists identify people eligible for LCS, reach out to family doctors, share the decision-making process and promote tobacco cessation. - - * Radiologists ensure appropriate image quality, minimum dose and a standardised reading/reporting algorithm, together with a clear definition of a "positive screen". - - * Strict algorithms define the exact management of screen-detected nodules and incidental findings. - - * For LCS to be (cost-)effective, it has to target a population defined by risk prediction models.}, + + Key Points + + * Pulmonologists and radiologists both have key roles in the set up of multidisciplinary LCS teams with experts from many other fields. + + * Pulmonologists identify people eligible for LCS, reach out to family doctors, share the decision-making process and promote tobacco cessation. + + * Radiologists ensure appropriate image quality, minimum dose and a standardised reading/reporting algorithm, together with a clear definition of a "positive screen". + + * Strict algorithms define the exact management of screen-detected nodules and incidental findings. + + * For LCS to be (cost-)effective, it has to target a population defined by risk prediction models.}, file = {Kauc20.pdf:pdf\\Kauc20.pdf:PDF}, optnote = {DIAG, INPRESS, RADIOLOGY}, pmid = {32052170}, month = {2}, + ss_id = {2cbfeefc3025a11beced3681bd0b358d44d4c0a8}, + all_ss_ids = {['93d384fea8e9acd340445c227ed226e1642bed49', '2cbfeefc3025a11beced3681bd0b358d44d4c0a8']}, + gscites = {118}, } @article{Kauc20a, @@ -12097,7 +14953,9 @@ @article{Kauc20a year = {2020}, month = {2}, gsid = {11915976228715108892}, - gscites = {5}, + gscites = {118}, + ss_id = {93d384fea8e9acd340445c227ed226e1642bed49}, + all_ss_ids = {['93d384fea8e9acd340445c227ed226e1642bed49']}, } @article{Kaze18, @@ -12125,13 +14983,83 @@ @article{Kaze20 file = {Kaze20.pdf:pdf\\Kaze20.pdf:PDF}, optnote = {DIAG}, gsid = {2312414813335334255}, - gscites = {55}, + gscites = {267}, + ss_id = {04507020e4e003042150dcc8602505d1bec71c49}, + all_ss_ids = {['04507020e4e003042150dcc8602505d1bec71c49']}, } -@article{Khal19, - author = {Nadieh Khalili and Nikolas Lessmann and Elise Turk and Nathalie H.P. Claessens and Roel de Heus and Tessel Kolk and Max A. Viergever and Manon J.N.L. Benders and Ivana I{\v{s}}gum}, - title = {Automatic brain tissue segmentation in fetal {MRI} using convolutional neural networks}, - journal = MRI, +@article{Kemp21, + author = {van Kempen, Evi J. and Post, Max and Mannil, Manoj and Witkam, Richard L. and ter Laan, Mark and Patel, Ajay and Meijer, Frederick J. A. and Henssen, Dylan}, + title = {~~Performance of machine learning algorithms for glioma segmentation of brain MRI: a systematic literature review and meta-analysis}, + doi = {10.1007/s00330-021-08035-0}, + year = {2021}, + abstract = {Abstract + Objectives + Different machine learning algorithms (MLAs) for automated segmentation of gliomas have been reported in the literature. Automated segmentation of different tumor characteristics can be of added value for the diagnostic work-up and treatment planning. The purpose of this study was to provide an overview and meta-analysis of different MLA methods. + + Methods + A systematic literature review and meta-analysis was performed on the eligible studies describing the segmentation of gliomas. Meta-analysis of the performance was conducted on the reported dice similarity coefficient (DSC) score of both the aggregated results as two subgroups (i.e., high-grade and low-grade gliomas). This study was registered in PROSPERO prior to initiation (CRD42020191033). + + Results + After the literature search (n = 734), 42 studies were included in the systematic literature review. Ten studies were eligible for inclusion in the meta-analysis. Overall, the MLAs from the included studies showed an overall DSC score of 0.84 (95% CI: 0.82-0.86). In addition, a DSC score of 0.83 (95% CI: 0.80-0.87) and 0.82 (95% CI: 0.78-0.87) was observed for the automated glioma segmentation of the high-grade and low-grade gliomas, respectively. However, heterogeneity was considerably high between included studies, and publication bias was observed. + + Conclusion + MLAs facilitating automated segmentation of gliomas show good accuracy, which is promising for future implementation in neuroradiology. However, before actual implementation, a few hurdles are yet to be overcome. It is crucial that quality guidelines are followed when reporting on MLAs, which includes validation on an external test set. + + Key Points + * MLAs from the included studies showed an overall DSC score of 0.84 (95% CI: 0.82-0.86), indicating a good performance. + * MLA performance was comparable when comparing the segmentation results of the high-grade gliomas and the low-grade gliomas. + * For future studies using MLAs, it is crucial that quality guidelines are followed when reporting on MLAs, which includes validation on an external test set. + }, + url = {http://dx.doi.org/10.1007/s00330-021-08035-0}, + file = {Kemp21.pdf:pdf\\Kemp21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {25}, + pages = {9638-9653}, + volume = {31}, + pmid = {34019128}, +} + +@article{Kers21, + author = {Kerschke, Laura and Weigel, Stefanie and Rodriguez-Ruiz, Alejandro and Karssemeijer, Nico and Heindel, Walter}, + title = {~~Using deep learning to assist readers during the arbitration process: a lesion-based retrospective evaluation of breast cancer screening performance}, + doi = {10.1007/s00330-021-08217-w}, + year = {2021}, + abstract = {Abstract + Objectives + To evaluate if artificial intelligence (AI) can discriminate recalled benign from recalled malignant mammographic screening abnormalities to improve screening performance. + + Methods + A total of 2257 full-field digital mammography screening examinations, obtained 2011-2013, of women aged 50-69 years which were recalled for further assessment of 295 malignant out of 305 truly malignant lesions and 2289 benign lesions after independent double-reading with arbitration, were included in this retrospective study. A deep learning AI system was used to obtain a score (0-95) for each recalled lesion, representing the likelihood of breast cancer. The sensitivity on the lesion level and the proportion of women without false-positive ratings (non-FPR) resulting under AI were estimated as a function of the classification cutoff and compared to that of human readers. + + Results + Using a cutoff of 1, AI decreased the proportion of women with false-positives from 89.9 to 62.0%, non-FPR 11.1% vs. 38.0% (difference 26.9%, 95% confidence interval 25.1-28.8%; p < .001), preventing 30.1% of reader-induced false-positive recalls, while reducing sensitivity from 96.7 to 91.1% (5.6%, 3.1-8.0%) as compared to human reading. The positive predictive value of recall (PPV-1) increased from 12.8 to 16.5% (3.7%, 3.5-4.0%). In women with mass-related lesions (n = 900), the non-FPR was 14.2% for humans vs. 36.7% for AI (22.4%, 19.8-25.3%) at a sensitivity of 98.5% vs. 97.1% (1.5%, 0-3.5%). + + Conclusion + The application of AI during consensus conference might especially help readers to reduce false-positive recalls of masses at the expense of a small sensitivity reduction. Prospective studies are needed to further evaluate the screening benefit of AI in practice. + + Key Points + * Integrating the use of artificial intelligence in the arbitration process reduces benign recalls and increases the positive predictive value of recall at the expense of some sensitivity loss. + * Application of the artificial intelligence system to aid the decision to recall a woman seems particularly beneficial for masses, where the system reaches comparable sensitivity to that of the readers, but with considerably reduced false-positives. + * About one-fourth of all recalled malignant lesions are not automatically marked by the system such that their evaluation (AI score) must be retrieved manually by the reader. A thorough reading of screening mammograms by readers to identify suspicious lesions therefore remains mandatory. + }, + url = {http://dx.doi.org/10.1007/s00330-021-08217-w}, + file = {Kers21.pdf:pdf\\Kers21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {7}, + pages = {842-852}, + volume = {32}, + pmid = {34383147}, +} + +@article{Khal19, + author = {Nadieh Khalili and Nikolas Lessmann and Elise Turk and Nathalie H.P. Claessens and Roel de Heus and Tessel Kolk and Max A. Viergever and Manon J.N.L. Benders and Ivana I{\v{s}}gum}, + title = {Automatic brain tissue segmentation in fetal {MRI} using convolutional neural networks}, + journal = MRI, year = {2019}, volume = {64}, pages = {77--89}, @@ -12140,7 +15068,9 @@ @article{Khal19 optnote = {DIAG}, month = {12}, gsid = {15448749691600249146}, - gscites = {13}, + gscites = {88}, + ss_id = {30dca929aeddfe3e24bfabaaeed66b3882c1fb4e}, + all_ss_ids = {['30dca929aeddfe3e24bfabaaeed66b3882c1fb4e']}, } @conference{Khal19a, @@ -12185,6 +15115,8 @@ @inproceedings{Klik06 optnote = {DIAG, RADIOLOGY}, gsid = {8798156317069698629}, gscites = {11}, + ss_id = {a13b6a3d2e3e5ddaa38f52ccf1eaa259bce43423}, + all_ss_ids = {['a13b6a3d2e3e5ddaa38f52ccf1eaa259bce43423']}, } @article{Kloo14, @@ -12242,6 +15174,9 @@ @article{Kloo20 pmid = {32211511}, optnote = {DIAG, RADIOLOGY}, file = {Kloo20.pdf:pdf\\Kloo20.pdf:PDF}, + ss_id = {6975511e8856bf97caedcd6a8935cde06a2fed10}, + all_ss_ids = {['6975511e8856bf97caedcd6a8935cde06a2fed10']}, + gscites = {6}, } @mastersthesis{Klug20, @@ -12286,8 +15221,10 @@ @article{Kobu16 optnote = {BioMR, DIAG, RADIOLOGY}, pmid = {26418614}, gsid = {7691912987528169028}, - gscites = {26}, + gscites = {30}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/172209}, + ss_id = {4c9abadd8af32f1db60112025a18f8ba33d47269}, + all_ss_ids = {['4c9abadd8af32f1db60112025a18f8ba33d47269']}, } @inproceedings{Kock10, @@ -12305,6 +15242,8 @@ @inproceedings{Kock10 month = {3}, gsid = {472504484418187668}, gscites = {11}, + ss_id = {da91be2f5ccd356f104f7729c45ddd6fbd6693d0}, + all_ss_ids = {['da91be2f5ccd356f104f7729c45ddd6fbd6693d0']}, } @inproceedings{Kock10a, @@ -12318,7 +15257,9 @@ @inproceedings{Kock10a file = {Kock10a.pdf:pdf\\Kock10a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {17284129500278835313}, - gscites = {22}, + gscites = {34}, + ss_id = {861d6ffbd965b28b558b675e5ef6f24670fc541f}, + all_ss_ids = {['861d6ffbd965b28b558b675e5ef6f24670fc541f']}, } @inproceedings{Kock12, @@ -12332,6 +15273,8 @@ @inproceedings{Kock12 optnote = {DIAG, RADIOLOGY}, gsid = {18323115745426701953}, gscites = {3}, + ss_id = {91e9a0a4318c95e05c7b1f490e55f737faceddfb}, + all_ss_ids = {['91e9a0a4318c95e05c7b1f490e55f737faceddfb']}, } @article{Kock14, @@ -12351,6 +15294,8 @@ @article{Kock14 gsid = {6125975055231559958}, gscites = {10}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/138328}, + ss_id = {c1f2f36dc770001c2b6cbd287b7984e225eac117}, + all_ss_ids = {['c1f2f36dc770001c2b6cbd287b7984e225eac117']}, } @article{Kock16, @@ -12369,8 +15314,10 @@ @article{Kock16 optnote = {DIAG, RADIOLOGY}, pmid = {27436568}, gsid = {13832411224923143732}, - gscites = {4}, + gscites = {5}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/171478}, + ss_id = {c28aa351b0f9a74539a6d5c92fe4a7325dcbf4f8}, + all_ss_ids = {['c28aa351b0f9a74539a6d5c92fe4a7325dcbf4f8']}, } @article{Kock16a, @@ -12386,6 +15333,9 @@ @article{Kock16a optnote = {DIAG, RADIOLOGY}, publisher = {Frontiers}, month = {12}, + ss_id = {f70ae8307930e0af1370e329b133fc75d72352fd}, + all_ss_ids = {['f70ae8307930e0af1370e329b133fc75d72352fd']}, + gscites = {0}, } @phdthesis{Kock17a, @@ -12394,7 +15344,7 @@ @phdthesis{Kock17a year = {2017}, url = {https://dspace.library.uu.nl/handle/1874/343111}, abstract = {This thesis describes and evaluates an interactive annotation system for chest CT scans. As a first step, the structure of interest is segmented automatically. This structure is then divided into smaller volumes of interest (VOIs) containing one type of texture. These VOIs - are automatically labeled, either by a classifier or using a heuristic approach.}, + are automatically labeled, either by a classifier or using a heuristic approach.}, file = {Kock17a.pdf:pdf/Kock17a.pdf:PDF}, optnote = {DIAG}, promotor = {M. A. Viergever, B. van Ginneken and W. M. Prokop}, @@ -12416,6 +15366,8 @@ @inproceedings{Koek12 optnote = {DIAG, RADIOLOGY}, gsid = {5775248371125334035}, gscites = {1}, + ss_id = {73373e664b599ba63aaa245fcfb00ccd1e4f4757}, + all_ss_ids = {['73373e664b599ba63aaa245fcfb00ccd1e4f4757']}, } @article{Koes18, @@ -12433,8 +15385,10 @@ @article{Koes18 optnote = {DIAG}, pmid = {30092877}, gsid = {5183040389226655264}, - gscites = {5}, + gscites = {20}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/194777}, + ss_id = {6f0c30323e83f89d95b797f49a6c4464e0576e00}, + all_ss_ids = {['6f0c30323e83f89d95b797f49a6c4464e0576e00']}, } @mastersthesis{Kok19, @@ -12480,7 +15434,9 @@ @inproceedings{Kooi16a file = {:pdf/Kooi16a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {5774304779995387763}, - gscites = {18}, + gscites = {43}, + ss_id = {7c1f80dba043b6b0e5369915225f718218a6fd45}, + all_ss_ids = {['7c1f80dba043b6b0e5369915225f718218a6fd45']}, } @article{Kooi17, @@ -12497,8 +15453,10 @@ @article{Kooi17 optnote = {DIAG, RADIOLOGY}, pmid = {27497072}, gsid = {4747474489852859159}, - gscites = {432}, + gscites = {765}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/173029}, + ss_id = {2a863a1ab6df3ca9a55573befcb89e1ed7b7df74}, + all_ss_ids = {['2a863a1ab6df3ca9a55573befcb89e1ed7b7df74']}, } @inproceedings{Kooi17a, @@ -12516,6 +15474,8 @@ @inproceedings{Kooi17a month = {3}, gsid = {18358685464566974613}, gscites = {2}, + ss_id = {03c07dd19884c370e3a49106a27578ffc3697475}, + all_ss_ids = {['03c07dd19884c370e3a49106a27578ffc3697475']}, } @inproceedings{Kooi17b, @@ -12528,20 +15488,22 @@ @inproceedings{Kooi17b pages = {101341J}, doi = {10.1117/12.2254586}, abstract = {When humans identify objects in images, context is an important cue; a cheetah is more likely to be a domestic cat - when a television set is recognised in the background. Similar principles apply to the analysis of medical images. - The detection of diseases that manifest unilaterally in symmetrical organs or organ pairs can in part be facilitated - by a search for symmetrical discrepancies in or between the organs in question. During a mammographic exam, - images are recorded of each breast and absence of a certain structure around the same location in the contra- - lateral image will render the area under scrutiny more suspicious and conversely, the presence of similar tissue - less so. In this paper, we present a fusion scheme for a deep Convolutional Neural Network (CNN) architecture - with the goal to optimally capture such asymmetries. The method is applied to the domain of mammography - CAD, but can be relevant to other medical image analysis tasks where symmetry is important such as lung, - prostate or brain images.}, + when a television set is recognised in the background. Similar principles apply to the analysis of medical images. + The detection of diseases that manifest unilaterally in symmetrical organs or organ pairs can in part be facilitated + by a search for symmetrical discrepancies in or between the organs in question. During a mammographic exam, + images are recorded of each breast and absence of a certain structure around the same location in the contra- + lateral image will render the area under scrutiny more suspicious and conversely, the presence of similar tissue + less so. In this paper, we present a fusion scheme for a deep Convolutional Neural Network (CNN) architecture + with the goal to optimally capture such asymmetries. The method is applied to the domain of mammography + CAD, but can be relevant to other medical image analysis tasks where symmetry is important such as lung, + prostate or brain images.}, file = {Kooi17b.pdf:pdf\\Kooi17b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {7794883843341570734}, gscites = {7}, + ss_id = {5b0eabbe26da0e57856f08cddc34e8c7a62c5cdb}, + all_ss_ids = {['5b0eabbe26da0e57856f08cddc34e8c7a62c5cdb']}, } @article{Kooi17c, @@ -12559,8 +15521,10 @@ @article{Kooi17c optnote = {DIAG, RADIOLOGY}, pmid = {28094850}, gsid = {12478615936975025414}, - gscites = {63}, + gscites = {95}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/173098}, + ss_id = {dcce31a81f66ba0502265cca7dbd9f8477eebf1f}, + all_ss_ids = {['dcce31a81f66ba0502265cca7dbd9f8477eebf1f']}, } @article{Kooi17d, @@ -12573,14 +15537,28 @@ @article{Kooi17d pages = {International Society for Optics and Photonics}, doi = {10.1117/1.JMI.4.4.044501}, abstract = {Neural networks, in particular deep Convolutional Neural Networks (CNN), have recently gone through a renaissance sparked by the introduction of more efficient training procedures and massive amounts of raw annotated data. Barring a handful of modalities, medical images are typically too large to present as input as a whole and models are consequently trained with subsets of images or cases, representing the most crucial bits of information. When inspecting a scene to identify objects, humans take cues from not just the article in question but also the elements in its vicinity: a frisbee is more likely to be a plate in the presence of a fork and knife. Similar principles apply to the analysis of medical images: specialists base their judgment of an abnormality on all available data, harnessing information such as symmetrical differences in or between organs in question and temporal change, if multiple recordings are available. \\ - - In this paper we investigate the addition of symmetry and temporal context information to a deep CNN with the purpose of detecting malignant soft tissue lesions in mammography. We employ a simple linear mapping that takes the location of a mass candidate and maps it to either the contra-lateral or prior mammogram and Regions Of Interest (ROI) are extracted around each location. We subsequently explore two different architectures (1) a fusion model employing two datastreams were both ROIs are fed to the network during training and testing and (2) a stage-wise approach where a single ROI CNN is trained on the primary image and subsequently used as feature extractor for both primary and symmetrical or prior ROIs. A 'shallow' Gradient Boosted Tree (GBT) classifier is then trained on the concatenation of these features and used to classify the joint representation. Results shown a significant increase in performance using the first architecture and symmetry information, but only marginal gains in performance using temporal data and the other setting. We feel results are promising and can greatly be improved when more temporal data becomes available.}, + + In this paper we investigate the addition of symmetry and temporal context information to a deep CNN with the purpose of detecting malignant soft tissue lesions in mammography. We employ a simple linear mapping that takes the location of a mass candidate and maps it to either the contra-lateral or prior mammogram and Regions Of Interest (ROI) are extracted around each location. We subsequently explore two different architectures (1) a fusion model employing two datastreams were both ROIs are fed to the network during training and testing and (2) a stage-wise approach where a single ROI CNN is trained on the primary image and subsequently used as feature extractor for both primary and symmetrical or prior ROIs. A 'shallow' Gradient Boosted Tree (GBT) classifier is then trained on the concatenation of these features and used to classify the joint representation. Results shown a significant increase in performance using the first architecture and symmetry information, but only marginal gains in performance using temporal data and the other setting. We feel results are promising and can greatly be improved when more temporal data becomes available.}, file = {Kooi17d.pdf:pdf\\Kooi17d.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {29021992}, month = {10}, gsid = {17173496398007022348}, - gscites = {6}, + gscites = {41}, + all_ss_ids = {['a908e3775fd064b0d249728a7261cfb1efa13b87']}, +} + +@article{Kooi17e, + author = {Kooi, Thijs and Karssemeijer, Nico}, + title = {~~Classifying Symmetrical Differences and Temporal Change in Mammography Using Deep Neural Networks}, + doi = {10.48550/ARXIV.1703.07715}, + year = {2017}, + abstract = {We investigate the addition of symmetry and temporal context information to a deep Convolutional Neural Network (CNN) with the purpose of detecting malignant soft tissue lesions in mammography. We employ a simple linear mapping that takes the location of a mass candidate and maps it to either the contra-lateral or prior mammogram and Regions Of Interest (ROI) are extracted around each location. We subsequently explore two different architectures (1) a fusion model employing two datastreams were both ROIs are fed to the network during training and testing and (2) a stage-wise approach where a single ROI CNN is trained on the primary image and subsequently used as feature extractor for both primary and symmetrical or prior ROIs. A 'shallow' Gradient Boosted Tree (GBT) classifier is then trained on the concatenation of these features and used to classify the joint representation. Results shown a significant increase in performance using the first architecture and symmetry information, but only marginal gains in performance using temporal data and the other setting. We feel results are promising and can greatly be improved when more temporal data becomes available.}, + url = {https://arxiv.org/abs/1703.07715}, + file = {Kooi17e.pdf:pdf\\Kooi17e.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:1703.07715}, + automatic = {yes}, } @phdthesis{Kooi18, @@ -12632,7 +15610,7 @@ @conference{Koop08a booktitle = ISMRM, year = {2008}, abstract = {Introduction: MR venography based upon susceptibility weighted imaging (SWI) can depict the venous vascular tree with excellent detail by using minimum intensity projections (mIP) over several slices [1]. While this visualisation works quite nicely, this approach does not classify veins, which can be necessary, e.g. to remove veins from high resolution functional MRI scans. To solve this it has been proposed to use filters known from angiography [2]. In this study we assessed the performance of two such filters: the Utrecht vesselness filter and Vessel Enhancing Diffusion (VED) in regions of high and low SNR. As higher field strengths are beneficial for SWI venography the VED filter was used on SWI data acquired at 7T and compared with 3T data. Methods:All experiments were performed on whole body MRI scanners (Siemens, Erlangen, Germany) at field strengths of 3T and 7T. SWI images were acquired using a firstorder flow-compensated 3D gradient echo FLASH sequence with the following parameters: FA 15A-A?A 1/2 , BW 120 Hz/pixel, acceleration factor 2 (GRAPPA). At 3T we used an echo time of 28 ms with a TR of 35 ms, at 7T these were 15 ms and 22 ms respectively. Whole brain sagittal acquisitions were performed on the same subject at both fields using a matrix size of 352x448x144 and a resolution of 0.57x0.57x1.25 mm3. Acquisition times were 15 and 10 minutes at 3T and 7T respectively. To test the filters in regions of high and low SNR a dataset was acquired using an eight channel occipital surface coil array [3]. In order to increase SNR an average over 4 repetitions was used. Matrix size was 144x192x40 using an isotropic resolution of 0.75 mm. Acquisition time was 2 minutes per volume. All datasets were intensityinhomogeneity corrected and veins were segmented using the Utrecht vesselness filter and VED which are described in detail in [4] and [5] respectively. In short, the Utrecht filter uses second order image information to distinguish dark tube-like structures (veins) from their surroundings and noise. The VED filter uses an iterative approach by alternately applying the Utrecht filter and a diffusion process to eliminate noise. However the shape/direction of the diffusion is dependent on the vessel likeliness found by the previous iteration of the Utrecht filter. For voxels that do not belong to the venous tree, the diffusion is isotropic, for venous voxels diffusion is applied in the direction of the vessel. This diffusion process improves the starting conditions for the next Utrecht filter step. In this study 5 iterations were used. In addition to the automated filters, manual segmentation was performed on the 0.75 mm resolution dataset to be compared with the VED result. Results: Fig. 1a shows a mIP over 11 slices of the dataset acquired using the occipital coil. Fig. 1b shows the maximum intensity projection (MIP) of the output of the Utrecht filter, fig. 1c shows the MIP of the VED result. In the bottom part of the images, where SNR is relatively high, both filters perform very well. In the top part, where SNR is reduced due to the inhomogeneous coil profile, the VED method is better able to contrast veins with respect to the noisy background than the Utrecht filter. Fig. 2 shows the manual segmentation (a) and the VED result (b) overlaid on the mIP of the data. Apart from the very top part of the image (where SNR was extremely low) both results are in excellent agreement. Conclusion: Two automated vein segmentation filters have been applied to SWI-venography data. In regions of high SNR the Utrecht vesselness filter and the vessel enhancing diffusion method both performed very well. In regions of low SNR the VED method outperformed the Utrecht filter and the result was in excellent agreement with manual segmentation results. The VED filter was applied to whole brain 3T and 7T data where A-A?A 1/2 due to the huge amount of visible veins - manual segmentation can no longer be considered a possibility. Although the results at 3T were slightly better than at 7T due to increased SNR inhomogeneity at the latter - field strength, 7T allows for a much shorter acquisition time and therefore shows good promise for future SWI-venography.}, + field strength, 7T allows for a much shorter acquisition time and therefore shows good promise for future SWI-venography.}, file = {Koop08a.pdf:pdf\\Koop08a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, } @@ -12670,6 +15648,7 @@ @article{Kort15 month = {10}, gsid = {6642129317660450449}, gscites = {75}, + all_ss_ids = {['349ca29f588b9c785085da7147a4b58df032a8bf', '7c83ece3400fa10da80bd363c15dd96eded51c9f', '88b86ad75e10c7dac7b70edbd05ed494d699ffb0']}, } @article{Kort16, @@ -12686,6 +15665,26 @@ @article{Kort16 file = {Kort16.pdf:pdf\\Kort16.pdf:PDF}, optnote = {DIAG, MUSIC, RADIOLOGY}, pmid = {27249826}, + ss_id = {2fbd158c39a795d294b80591f85305b95b2cff58}, + all_ss_ids = {['2fbd158c39a795d294b80591f85305b95b2cff58']}, + gscites = {39}, +} + +@article{Kort22, + author = {De Kort, Elizabeth A. and Buil, Jochem B. and Schalekamp, Steven and Schaefer-Prokop, Cornelia and Verweij, Paul E. and Schaap, Nicolaas P. M. and Blijlevens, Nicole M. A. and Van der Velden, Walter J. F. M.}, + title = {~~Invasive Fungal Disease in Patients with Myeloid Malignancies: A Retrospective Cohort Study of a Diagnostic-Driven Care Pathway Withholding Mould-Active Prophylaxis}, + doi = {10.3390/jof8090925}, + year = {2022}, + abstract = {Objectives: Patients receiving remission induction therapy for acute myeloid leukaemia (AML) are at high risk of developing invasive fungal disease (IFD). Newer therapies with targeted antileukemic agents and the emergence of azole resistance pose a challenge to the strategy of primary antifungal prophylaxis. We report the experience of a diagnostic-driven care pathway (DCP) for the management of IFD in these patients, using only culture-directed mould inactive prophylaxis. Methods: Retrospectively, we used a single-centre study of consecutive patients receiving intensive chemotherapy for myeloid malignancies between 2014 and 2021. DCP consisted of serial cultures and serum galactomannan (sGM) screening, CT imaging, and bronchoscopy to direct targeted antifungal treatment. IFD was classified according to the 2020 EORTC/MSGERC criteria. Results: A total of 192 patients with myeloid malignancies received 300 courses of intensive chemotherapy. There were 14 cases of invasive yeast infections and 18 of probable/proven invasive mould disease (IMD). The incidence of probable/proven IMD during the first cycle of remission-induction chemotherapy was 4.6% (n = 9). sGM remained negative in all cases of invasive aspergillosis (IA), with positive mycology findings in bronchoalveolar lavage. All-cause mortality was 9.4% (n = 18) 100 days after starting chemotherapy and was comparable between patients with or without IFD. The fungal-related mortality was 1% (n = 2). Conclusion: Diagnostic-driven based management without universal mould active prophylaxis is a feasible strategy in the management of IFD and limits unnecessary antimould treatment during intensive chemotherapy. The poor performance of serial serum galactomannan screening in detecting IA warrants further investigation.}, + url = {http://dx.doi.org/10.3390/jof8090925}, + file = {Kort22.pdf:pdf\\Kort22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Fungi}, + automatic = {yes}, + citation-count = {2}, + pages = {925}, + volume = {8}, + pmid = {36135650}, } @article{Kos20, @@ -12700,9 +15699,10 @@ @article{Kos20 file = {:pdf/Kos2020.pdf:PDF}, optnote = {DIAG}, gsid = {3636221107829510527}, - gscites = {1}, + gscites = {103}, pmid = {32411819}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/220827}, + all_ss_ids = {['a7f96553bc690f6f66697895a98e9377cc0f7562', 'cb774bdd19514d661ab76d0d30d158e4e74a859f', '889cc0565d6e2e844f4db0132c71a612672d2497']}, } @conference{Kosc18, @@ -12711,9 +15711,9 @@ @conference{Kosc18 booktitle = RSNA, year = {2018}, abstract = {PURPOSE: Brain extraction methods in {MRI} have so far been exclusively developed for T1- and T2-weighted images. A deep neural network is presented to segment the brain tissue in susceptibility-weighted images ({SWI}) in healthy individuals and patients with traumatic brain injury ({TBI}). - MATERIALS AND METHODS: In total, {MRI} scans from 33 patients with moderate to severe {TBI} and 18 healthy controls were collected. {SWI}s were acquired with 27ms {TR}, 20ms {TE}, 15deg flip angle, and0.98x0.98x1.00mm3 voxel size on a {3T} Siemens MRI scanner. A small scale 2D-U-Net was implemented (18 convolution layers, max. 256 features per layer) processing a volume in axial direction. The U-Net architecture allowed the model to utilize both local and contextual information. The output probability maps were thresholded and possible outliers were removed by taking the largest connected component. 20 {TBI} patients and 10 controls served as a test set, the remaining patients were used for training. The reference standard were brain masks obtained with {SPM}, a publicly available software package commonly used for brain extractions in {MR} neuroimaging, but not optimized for {SWI} sequences. These annotations were visually inspected. The results of the deep learning method were visually inspected for completeness and overall quality. Dice similarity coefficient ({DCS}) and the modified Hausdorff ({MHD}) distance were reported for the test set. - RESULTS: The {DCS} was 0.98+-0.002 per volume at the chosen operating point on the SPM standard and the {MHD} was 0.93+-0.11mm per volume. It took less than 10 seconds to compute the complete 3D brain mask on a modern {GPU}. Overall, our method was capable of learning from a sub-optimal reference standard and extracting the brain in an {SWI} image. It mimicked some of the deficiencies of the {SPM} brain masks, such as occasional failures in the most inferior or superior axial slices, but also mitigated others through generalization over the training set. Holes in the mask caused by contusions or hematomas were less prevalent with the 2D-U-Net than with {SPM}. - CONCLUSION: The 2D-U-Net method provides fast brain extractions in {MR-SWI}.}, + MATERIALS AND METHODS: In total, {MRI} scans from 33 patients with moderate to severe {TBI} and 18 healthy controls were collected. {SWI}s were acquired with 27ms {TR}, 20ms {TE}, 15deg flip angle, and0.98x0.98x1.00mm3 voxel size on a {3T} Siemens MRI scanner. A small scale 2D-U-Net was implemented (18 convolution layers, max. 256 features per layer) processing a volume in axial direction. The U-Net architecture allowed the model to utilize both local and contextual information. The output probability maps were thresholded and possible outliers were removed by taking the largest connected component. 20 {TBI} patients and 10 controls served as a test set, the remaining patients were used for training. The reference standard were brain masks obtained with {SPM}, a publicly available software package commonly used for brain extractions in {MR} neuroimaging, but not optimized for {SWI} sequences. These annotations were visually inspected. The results of the deep learning method were visually inspected for completeness and overall quality. Dice similarity coefficient ({DCS}) and the modified Hausdorff ({MHD}) distance were reported for the test set. + RESULTS: The {DCS} was 0.98+-0.002 per volume at the chosen operating point on the SPM standard and the {MHD} was 0.93+-0.11mm per volume. It took less than 10 seconds to compute the complete 3D brain mask on a modern {GPU}. Overall, our method was capable of learning from a sub-optimal reference standard and extracting the brain in an {SWI} image. It mimicked some of the deficiencies of the {SPM} brain masks, such as occasional failures in the most inferior or superior axial slices, but also mitigated others through generalization over the training set. Holes in the mask caused by contusions or hematomas were less prevalent with the 2D-U-Net than with {SPM}. + CONCLUSION: The 2D-U-Net method provides fast brain extractions in {MR-SWI}.}, file = {Kosc18.pdf:pdf\\Kosc18.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, } @@ -12729,6 +15729,9 @@ @article{Kosc22 abstract = {Cerebral microbleeds (CMBs) are a recognised biomarker of traumatic axonal injury (TAI). Their number and location provide valuable information in the long-term prognosis of patients who sustained a traumatic brain injury (TBI). Accurate detection of CMBs is necessary for both research and clinical applications. CMBs appear as small hypointense lesions on susceptibility-weighted magnetic resonance imaging (SWI). Their size and shape vary markedly in cases of TBI. Manual annotation of CMBs is a difficult, error-prone, and time-consuming task. Several studies addressed the detection of CMBs in other neuropathologies with convolutional neural networks (CNNs). In this study, we developed and contrasted a classification (Patch-CNN) and two segmentation (Segmentation-CNN, U-Net) approaches for the detection of CMBs in TBI cases. The models were trained using 45 datasets, and the best models were chosen according to 16 validation sets. Finally, the models were evaluated on 10 TBI and healthy control cases, respectively. Our three models outperform the current status quo in the detection of traumatic CMBs, achieving higher sensitivity at low false positive (FP) counts. Furthermore, using a segmentation approach allows for better precision. The best model, the U-Net, achieves a detection rate of 90\% at FP counts of 17.1 in TBI patients and 3.4 in healthy controls.}, file = {PubMed entry:http\://www.ncbi.nlm.nih.gov/pubmed/35597029:text/html}, pmid = {35597029}, + ss_id = {cefe4c90cb1db768ad407f8ffeb0c51cbc0de0b2}, + all_ss_ids = {['cefe4c90cb1db768ad407f8ffeb0c51cbc0de0b2']}, + gscites = {6}, } @article{Kost16, @@ -12749,6 +15752,20 @@ @article{Kost16 gscites = {55}, } +@inproceedings{Kost16a, + author = {Kost, Henning and Homeyer, Andr\'{e} and Bult, Peter and Balkenhol, Maschenka C. A. and van der Laak, Jeroen A. W. M. and Hahn, Horst K.}, + title = {~~A generic nuclei detection method for histopathological breast images}, + doi = {10.1117/12.2209613}, + year = {2016}, + abstract = {The detection of cell nuclei plays a key role in various histopathological image analysis problems. Considering the high variability of its applications, we propose a novel generic and trainable detection approach. Adaption to specific nuclei detection tasks is done by providing training samples. A trainable deconvolution and classification algorithm is used to generate a probability map indicating the presence of a nucleus. The map is processed by an extended watershed segmentation step to identify the nuclei positions. We have tested our method on data sets with different stains and target nuclear types. We obtained F1-measures between 0.83 and 0.93.}, + url = {http://dx.doi.org/10.1117/12.2209613}, + file = {Kost16a.pdf:pdf\\Kost16a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {SPIE Proceedings}, + automatic = {yes}, + citation-count = {2}, +} + @article{Kox11, author = {Kox, M. and Pompe, J.C. and Peters, E. and Vaneker M., and van der Laak, J.W. and van der Hoeven, J.G. and Scheffer, G.J. and Hoedemaekers, C.W. and Pickkers, P.}, title = {a7 Nicotinic acetylcholine receptor agonist GTS-21 attenuates ventilator-induced tumour necrosis factor-a production and lung injury}, @@ -12787,6 +15804,25 @@ @conference{Kuij10 optnote = {DIAG, RADIOLOGY}, } +@article{Laak00, + author = {van der Laak, J. A. W. M. and Pahlplatz, M. M. and Hanselaar, A. G. and de Wilde, P. C.}, + title = {Hue-saturation-density (HSD) model for stain recognition in digital images from transmitted light microscopy}, + issue = {4}, + pages = {275--284}, + volume = {39}, + abstract = {Transmitted light microscopy is used in pathology to examine stained tissues. Digital image analysis is gaining importance as a means to quantify alterations in tissues. A prerequisite for accurate and reproducible quantification is the possibility to recognise stains in a standardised manner, independently of variations in the staining density. The usefulness of three colour models was studied using data from computer simulations and experimental data from an immuno-doublestained tissue section. Direct use of the three intensities obtained by a colour camera results in the red-green-blue (RGB) model. By decoupling the intensity from the RGB data, the hue-saturation-intensity (HSI) model is obtained. However, the major part of the variation in perceived intensities in transmitted light microscopy is caused by variations in staining density. Therefore, the hue-saturation-density (HSD) transform was defined as the RGB to HSI transform, applied to optical density values rather than intensities for the individual RGB channels. In the RGB model, the mixture of chromatic and intensity information hampers standardisation of stain recognition. In the HSI model, mixtures of stains that could be distinguished from other stains in the RGB model could not be separated. The HSD model enabled all possible distinctions in a two-dimensional, standardised data space. In the RGB model, standardised recognition is only possible by using complex and time-consuming algorithms. The HSI model is not suitable for stain recognition in transmitted light microscopy. The newly derived HSD model was found superior to the existing models for this purpose.}, + file = {Laak00.pdf:pdf\\Laak00.pdf:PDF}, + journal = Cytometry, + month = apr, + optnote = {DIAG}, + pmid = {10738280}, + year = {2000}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/144809}, + ss_id = {57e693a3e04b766b142f72df5c6c6767620fb369}, + all_ss_ids = {['57e693a3e04b766b142f72df5c6c6767620fb369']}, + gscites = {105}, +} + @article{Laak19, author = {van der Laak, Jeroen and Ciompi, Francesco and Litjens, Geert}, title = {No pixel-level annotations needed}, @@ -12802,8 +15838,10 @@ @article{Laak19 optnote = {DIAG, RADIOLOGY}, pmid = {31624355}, gsid = {13408433721865759246}, - gscites = {2}, + gscites = {11}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/215679}, + ss_id = {48547eb6d7527cf31f47cd77cfec4af159a23a51}, + all_ss_ids = {['48547eb6d7527cf31f47cd77cfec4af159a23a51']}, } @article{Laak21, @@ -12820,6 +15858,41 @@ @article{Laak21 pmid = {33990804}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/235736}, + ss_id = {7d873a9c49d3864709aa762f8740edcdbd7369c5}, + all_ss_ids = {['7d873a9c49d3864709aa762f8740edcdbd7369c5']}, + gscites = {311}, +} + +@article{Laak22, + author = {van der Laak, Jeroen A. and Gr\"{u}nberg, Katrien and Frisk, Anna-Lena A. and Moulin, Pierre}, + title = {~~BUILDING AN E.U.-SCALE DIGITAL PATHOLOGY REPOSITORY: THE BIGPICTURE INITIATIVE}, + doi = {10.1016/j.jpi.2022.100026}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.jpi.2022.100026}, + file = {Laak22.pdf:pdf\\Laak22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Pathology Informatics}, + automatic = {yes}, + citation-count = {0}, + pages = {100026}, + volume = {13}, +} + +@article{Laar23, + author = {Laarhuis, Babette I. and Janssen, Marcel J. R. and Simons, Michiel and van Kalmthout, Ludwike W. M. and van der Doelen, Maarten J. and Peters, Steffie M. B. and Westdorp, Harm and van Oort, Inge M. and Litjens, Geert and Gotthardt, Martin and Nagarajah, James and Mehra, Niven and Prive, Bastiaan M.}, + year = {2023}, + month = {4}, + journal = CGC, + title = {Tumoral Ki67 and PSMA Expression in Fresh Pre-PSMA-RLT Biopsies and Its Relation With PSMA-PET Imaging and Outcomes of PSMA-RLT in Patients With mCRPC.}, + doi = {10.1016/j.clgc.2023.04.003}, + abstract = {Prostate specific membrane antigen (PSMA) directed radioligand therapy (RLT) is a novel therapy for metastatic castration-resistant prostate cancer (mCRPC) patients. However, it is still poorly understood why approximately 40% of the patients does not respond to PSMA-RLT. The aims of this study were to evaluate the pretreatment PSMA expression on immunohistochemistry (IHC) and PSMA uptake on PET/CT imaging in mCRPC patients who underwent PSMA-RLT. We correlated these parameters and a cell proliferation marker (Ki67) to the therapeutic efficacy of PSMA-RLT. In this retrospective study, mCRPC patients who underwent PSMA-RLT were analyzed. Patients biopsies were scored for immunohistochemical Ki67 expression, PSMA staining intensity and percentage of cells with PSMA expression. Moreover, the PSMA tracer uptake of the tumor lesion(s) and healthy organs on PET/CT imaging was assessed. The primary outcome was to evaluate the association between histological PSMA protein expression of tumor in pre-PSMA-RLT biopsies and the PSMA uptake on PSMA PET/CT imaging of the biopsied lesion. Secondary outcomes were to assess the relationship between PSMA expression and Ki67 on IHC and the progression free survival (PFS) and overall survival (OS) following PSMA-RLT. In total, 22 mCRPC patients were included in this study. Nineteen (86%) patients showed a high and homogenous PSMA expression of >80% on IHC. Three (14%) patients had low PSMA expression on IHC. Although there was limited PSMA uptake on PET/CT imaging, these 3 patients had lower PSMA uptake on PET/CT imaging compared to the patients with high PSMA expression on IHC. Yet, no correlation was found between PSMA uptake on PET/CT imaging and PSMA expression on IHC (SUVmax: R = 0.046 and SUVavg: R = 0.036). The 3 patients had a shorter PFS compared to the patients with high PSMA expression on IHC (HR: 4.76, 95% CI: 1.14-19.99; P = .033). Patients with low Ki67 expression had a longer PFS and OS compared to patients with a high Ki67 expression (HR: 0.40, 95% CI: 0.15-1.06; P = .013) CONCLUSION: The PSMA uptake on PSMA-PET/CT generally followed the PSMA expression on IHC. However, heterogeneity may be missed on PSMA-PET/CT. Immunohistochemical PSMA and Ki67 expression in fresh tumor biopsies, may contribute to predict treatment efficacy of PSMA-RLT in mCRPC patients. This needs to be further explored in prospective cohorts.}, + file = {:pdf/Laar23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {37164814}, + ss_id = {97c5ca5dd59f564c88a0f36e74a4f55c1ae66b79}, + all_ss_ids = {['97c5ca5dd59f564c88a0f36e74a4f55c1ae66b79']}, + gscites = {0}, } @article{Laba17, @@ -12837,6 +15910,8 @@ @article{Laba17 pmid = {28146422}, gsid = {16955025695820045228}, gscites = {17}, + ss_id = {510bebf1faa812da5e70ce18eedfa768f0077a38}, + all_ss_ids = {['510bebf1faa812da5e70ce18eedfa768f0077a38']}, } @article{Labus22a, @@ -12848,9 +15923,11 @@ @article{Labus22a url = {https://link.springer.com/article/10.1007/s00330-022-08978-y}, abstract = {OBJECTIVES: To evaluate the effect of a deep learning-based computer-aided diagnosis (DL-CAD) system on experienced and less-experienced radiologists in reading prostate mpMRI. METHODS: In this retrospective, multi-reader multi-case study, a consecutive set of 184 patients examined between 01/2018 and 08/2019 were enrolled. Ground truth was combined targeted and 12-core systematic transrectal ultrasound-guided biopsy. Four radiologists, two experienced and two less-experienced, evaluated each case twice, once without (DL-CAD-) and once assisted by DL-CAD (DL-CAD+). ROC analysis, sensitivities, specificities, PPV and NPV were calculated to compare the diagnostic accuracy for the diagnosis of prostate cancer (PCa) between the two groups (DL-CAD- vs. DL-CAD+). Spearman's correlation coefficients were evaluated to assess the relationship between PI-RADS category and Gleason score (GS). Also, the median reading times were compared for the two reading groups. RESULTS: In total, 172 patients were included in the final analysis. With DL-CAD assistance, the overall AUC of the less-experienced radiologists increased significantly from 0.66 to 0.80 (p = 0.001; cutoff ISUP GG >= 1) and from 0.68 to 0.80 (p = 0.002; cutoff ISUP GG >= 2). Experienced radiologists showed an AUC increase from 0.81 to 0.86 (p = 0.146; cutoff ISUP GG >= 1) and from 0.81 to 0.84 (p = 0.433; cutoff ISUP GG >= 2). Furthermore, the correlation between PI-RADS category and GS improved significantly in the DL-CAD + group (0.45 vs. 0.57; p = 0.03), while the median reading time was reduced from 157 to 150 s (p = 0.023). CONCLUSIONS: DL-CAD assistance increased the mean detection performance, with the most significant benefit for the less-experienced radiologist; with the help of DL-CAD less-experienced radiologists reached performances comparable to that of experienced radiologists.}, optnote = {DIAG, RADIOLOGY}, + ss_id = {ce59572dd6919387c5260721bb7b070ca7ebf444}, + all_ss_ids = {['ce59572dd6919387c5260721bb7b070ca7ebf444']}, + gscites = {10}, } - @article{Lang18, author = {de Lange, S V and Bakker, M F and Monninkhof, E M and Peeters, P H M and de Koekkoek-Doll, P K and Mann, R M and Rutten, M J C M and Bisschops, R H C and Veltman, J and Duvivier, K M and Lobbes, M B I and de Koning, H J and Karssemeijer, N and Pijnappel, R M and Veldhuis, W B and van Gils, C H}, title = {Reasons for (non)participation in supplemental population-based MRI breast screening for women with extremely dense breasts}, @@ -12866,7 +15943,26 @@ @article{Lang18 optnote = {DIAG, RADIOLOGY}, pmid = {29759590}, gsid = {16489935243904182211}, - gscites = {7}, + gscites = {20}, + ss_id = {db7c0e95188f028e4899642d1510c600e7fb44f2}, + all_ss_ids = {['db7c0e95188f028e4899642d1510c600e7fb44f2']}, +} + +@article{Lark18, + author = {Larkin, James R and Simard, Manon A and Khrapitchev, Alexandre A and Meakin, James A and Okell, Thomas W and Craig, Martin and Ray, Kevin J and Jezzard, Peter and Chappell, Michael A and Sibson, Nicola R}, + title = {~~Quantitative blood flow measurement in rat brain with multiphase arterial spin labelling magnetic resonance imaging}, + doi = {10.1177/0271678x18756218}, + year = {2018}, + abstract = { Cerebral blood flow is an important parameter in many diseases and functional studies that can be accurately measured in humans using arterial spin labelling (ASL) MRI. However, although rat models are frequently used for preclinical studies of both human disease and brain function, rat CBF measurements show poor consistency between studies. This lack of reproducibility is due, partly, to the smaller size and differing head geometry of rats compared to humans, as well as the differing analysis methodologies employed and higher field strengths used for preclinical MRI. To address these issues, we have implemented, optimised and validated a multiphase pseudo-continuous ASL technique, which overcomes many of the limitations of rat CBF measurement. Three rat strains (Wistar, Sprague Dawley and Berlin Druckrey IX) were used, and CBF values validated against gold-standard autoradiography measurements. Label positioning was found to be optimal at 45deg, while post-label delay was optimised to 0.55 s. Whole brain CBF measures were 109 +- 22, 111 +- 18 and 100 +- 15 mL/100 g/min by multiphase pCASL, and 108 +- 12, 116 +- 14 and 122 +- 16 mL/100 g/min by autoradiography in Wistar, SD and BDIX cohorts, respectively. Tumour model analysis shows that the developed methods also apply in disease states. Thus, optimised multiphase pCASL provides robust, reproducible and non-invasive measurement of CBF in rats. }, + url = {http://dx.doi.org/10.1177/0271678X18756218}, + file = {Lark18.pdf:pdf\\Lark18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Cerebral Blood Flow & Metabolism}, + automatic = {yes}, + citation-count = {27}, + pages = {1557-1569}, + volume = {39}, + pmid = {29498562}, } @inproceedings{Lass11, @@ -12919,7 +16015,9 @@ @article{Lass12b pmid = {23014712}, month = {2}, gsid = {1728731555240293040}, - gscites = {105}, + gscites = {114}, + ss_id = {eaea24fdc6aa24479ff0cbd3b58dfcb2e8d56868}, + all_ss_ids = {['eaea24fdc6aa24479ff0cbd3b58dfcb2e8d56868']}, } @conference{Lass14, @@ -12946,8 +16044,10 @@ @article{Lass15 pmid = {25591989}, month = {1}, gsid = {9522455507672982699}, - gscites = {51}, + gscites = {75}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/153905}, + ss_id = {95c6d05f35d82b06b155d05f1a8954e940991ab9}, + all_ss_ids = {['95c6d05f35d82b06b155d05f1a8954e940991ab9']}, } @phdthesis{Lass15a, @@ -12979,8 +16079,10 @@ @article{Lass17 pmid = {28570264}, month = {7}, gsid = {15670809461306486226}, - gscites = {8}, + gscites = {12}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/181908}, + ss_id = {a1aa321aa6892ab2478ec99f422d1110afe1fc7d}, + all_ss_ids = {['a1aa321aa6892ab2478ec99f422d1110afe1fc7d']}, } @inproceedings{Lass20, @@ -12991,6 +16093,9 @@ @inproceedings{Lass20 url = {https://2020.midl.io/papers/lassen-schmidt20.html}, optnote = {DIAG, RADIOLOGY}, abstract = {Fully-automatic lung lobe segmentation is challenging due to anatomical variations, pathologies, and incomplete fissures. We trained a 3D u-net for pulmonary lobe segmentation on 49 mainly publically available datasets and introduced a weighted Dice loss function to emphasize the lobar boundaries. To validate the performance of the proposed method we compared the results to two other methods. The new loss function improved the mean distance to 1.46 mm (compared to 2.08 mm for simple loss function without weighting).}, + ss_id = {971a413a2cb3163d7c912ed8a1fd53ba64bda007}, + all_ss_ids = {['971a413a2cb3163d7c912ed8a1fd53ba64bda007']}, + gscites = {4}, } @inproceedings{Laue13, @@ -13004,6 +16109,37 @@ @inproceedings{Laue13 file = {Laue13.pdf:pdf\\Laue13.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {3}, + ss_id = {ad50e7a84dce1d4c36c5387731d99005711b062d}, + all_ss_ids = {['ad50e7a84dce1d4c36c5387731d99005711b062d']}, + gscites = {4}, +} + +@article{Laur22, + author = {Lauritzen, Andreas D. and von Euler-Chelpin, My Catarina and Lynge, Elsebeth and Vejborg, Ilse and Nielsen, Mads and Karssemeijer, Nico and Lillholm, Martin}, + title = {~~Robust Cross-vendor Mammographic Texture Models Using Augmentation-based Domain Adaptation for Long-term Breast Cancer Risk}, + doi = {10.48550/ARXIV.2212.13439}, + year = {2022}, + abstract = {Purpose: Risk-stratified breast cancer screening might improve early detection and efficiency without comprising quality. However, modern mammography-based risk models do not ensure adaptation across vendor-domains and rely on cancer precursors, associated with short-term risk, which might limit long-term risk assessment. We report a cross-vendor mammographic texture model for long-term risk. Approach: The texture model was robustly trained using two systematically designed case-control datasets. Textural features, indicative of future breast cancer, were learned by excluding samples with diagnosed/potential malignancies from training. An augmentation-based domain adaption technique, based on flavorization of mammographic views, ensured generalization across vendor-domains. The model was validated in 66,607 consecutively screened Danish women with flavorized Siemens views and 25,706 Dutch women with Hologic-processed views. Performances were evaluated for interval cancers (IC) within two years from screening and long-term cancers (LTC) from two years after screening. The texture model was combined with established risk factors to flag 10% of women with the highest risk. Results: In Danish women, the texture model achieved an area under the receiver operating characteristic (AUC) of 0.71 and 0.65 for ICs and LTCs, respectively. In Dutch women with Hologic-processed views, the AUCs were not different from AUCs in Danish women with flavorized views. The AUC for texture combined with established risk factors increased to 0.68 for LTCs. The 10% of women flagged as high-risk accounted for 25.5% of ICs and 24.8% of LTCs. Conclusions: The texture model robustly estimated long-term breast cancer risk while adapting to an unseen processed vendor-domain and identified a clinically relevant high-risk subgroup.}, + url = {https://arxiv.org/abs/2212.13439}, + file = {Laur22.pdf:pdf\\Laur22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2212.13439}, + automatic = {yes}, + pmid = {35438561}, +} + +@article{Laur22, + author = {Lauritzen, Andreas D. and von Euler-Chelpin, My Catarina and Lynge, Elsebeth and Vejborg, Ilse and Nielsen, Mads and Karssemeijer, Nico and Lillholm, Martin}, + title = {~~Robust Cross-vendor Mammographic Texture Models Using Augmentation-based Domain Adaptation for Long-term Breast Cancer Risk}, + doi = {10.48550/ARXIV.2212.13439}, + year = {2022}, + abstract = {Purpose: Risk-stratified breast cancer screening might improve early detection and efficiency without comprising quality. However, modern mammography-based risk models do not ensure adaptation across vendor-domains and rely on cancer precursors, associated with short-term risk, which might limit long-term risk assessment. We report a cross-vendor mammographic texture model for long-term risk. Approach: The texture model was robustly trained using two systematically designed case-control datasets. Textural features, indicative of future breast cancer, were learned by excluding samples with diagnosed/potential malignancies from training. An augmentation-based domain adaption technique, based on flavorization of mammographic views, ensured generalization across vendor-domains. The model was validated in 66,607 consecutively screened Danish women with flavorized Siemens views and 25,706 Dutch women with Hologic-processed views. Performances were evaluated for interval cancers (IC) within two years from screening and long-term cancers (LTC) from two years after screening. The texture model was combined with established risk factors to flag 10% of women with the highest risk. Results: In Danish women, the texture model achieved an area under the receiver operating characteristic (AUC) of 0.71 and 0.65 for ICs and LTCs, respectively. In Dutch women with Hologic-processed views, the AUCs were not different from AUCs in Danish women with flavorized views. The AUC for texture combined with established risk factors increased to 0.68 for LTCs. The 10% of women flagged as high-risk accounted for 25.5% of ICs and 24.8% of LTCs. Conclusions: The texture model robustly estimated long-term breast cancer risk while adapting to an unseen processed vendor-domain and identified a clinically relevant high-risk subgroup.}, + url = {https://arxiv.org/abs/2212.13439}, + file = {Laur22.pdf:pdf\\Laur22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2212.13439}, + automatic = {yes}, + pmid = {35438561}, } @article{Leac12, @@ -13020,8 +16156,10 @@ @article{Leac12 pmid = {22562143}, year = {2012}, gsid = {7944651468101679970}, - gscites = {139}, + gscites = {153}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/108222}, + ss_id = {9b7cbf3e93c3b346e824f26d1faf4194ec10cb9f}, + all_ss_ids = {['9b7cbf3e93c3b346e824f26d1faf4194ec10cb9f']}, } @inproceedings{Leem15, @@ -13045,10 +16183,10 @@ @conference{Leem17 booktitle = ECR, year = {2017}, abstract = {Purpose: Segmentation of cerebral white matter (WM), gray matter (GM) and cerebrospinal fluid (CSF) in head CT is important for subsequent quantitative analysis and automated detection of cerebral pathology. We introduce VCAST, a new volumetric annotation tool aimed at delineating soft tissue in non-contrast CT (NCCT) and CT perfusion (CTP). - Methods and Materials: VCAST supports traditional 2D visualizations and annotations, and provides functionalities to facilitate 3D segmentations based on pre-calculated grids of volumetric clusters where the clusters are spatially coherently grouped based on HUs. Clicking a cluster in a 2D-plane allows for inclusion of the 3D-cluster in the output segmentation. - Ten patients with suspicion of ischemic stroke were included in this retrospective study, five NCCTs and five whole brain CTPs (320-row detector scanner). Temporal average CTA was reconstructed from CTP and in one slice in arbitrary direction, WM, GM and CSF were annotated two times by one observer using VCAST. In NCCT, a subvolume of approximately 22 mm^3 was randomly selected in which CSF was annotated by one observer, using VCAST either with 2D (slice-based) or 2D and 3D (cluster-based) annotation support. Dice coefficients and annotation times were reported. - Results: Dice coefficients were 0.86A+-0.04, 0.91A+-0.02, 0.87A+-0.02 for CSF, GM and WM respectively. CSF annotation times reduced from 16A+-9 to 8A+-5 minutes with 3D cluster support (p=0.02). CSF Dice similarity was 0.81A+-0.03. - Conclusion: VCAST is a volumetric annotation tool which reduces the time to obtain 3D segmentations in head CT while maintaining good overlap with a slice-based approach.}, + Methods and Materials: VCAST supports traditional 2D visualizations and annotations, and provides functionalities to facilitate 3D segmentations based on pre-calculated grids of volumetric clusters where the clusters are spatially coherently grouped based on HUs. Clicking a cluster in a 2D-plane allows for inclusion of the 3D-cluster in the output segmentation. + Ten patients with suspicion of ischemic stroke were included in this retrospective study, five NCCTs and five whole brain CTPs (320-row detector scanner). Temporal average CTA was reconstructed from CTP and in one slice in arbitrary direction, WM, GM and CSF were annotated two times by one observer using VCAST. In NCCT, a subvolume of approximately 22 mm^3 was randomly selected in which CSF was annotated by one observer, using VCAST either with 2D (slice-based) or 2D and 3D (cluster-based) annotation support. Dice coefficients and annotation times were reported. + Results: Dice coefficients were 0.86A+-0.04, 0.91A+-0.02, 0.87A+-0.02 for CSF, GM and WM respectively. CSF annotation times reduced from 16A+-9 to 8A+-5 minutes with 3D cluster support (p=0.02). CSF Dice similarity was 0.81A+-0.03. + Conclusion: VCAST is a volumetric annotation tool which reduces the time to obtain 3D segmentations in head CT while maintaining good overlap with a slice-based approach.}, optnote = {DIAG, RADIOLOGY}, } @@ -13062,7 +16200,9 @@ @inproceedings{Leem18a file = {:pdf/Leem18a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {16960578953340713390}, - gscites = {5}, + gscites = {11}, + ss_id = {f2ab2b234da918817c1721ccea4870c69285ad6a}, + all_ss_ids = {['f2ab2b234da918817c1721ccea4870c69285ad6a']}, } @inproceedings{Leem18b, @@ -13075,7 +16215,8 @@ @inproceedings{Leem18b file = {:pdf/Leem18b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {12856903356329628963}, - gscites = {1}, + gscites = {2}, + all_ss_ids = {['63c432e76128b36d5943eb116a38f98a9bb9ae27']}, } @inproceedings{Leem18c, @@ -13085,18 +16226,18 @@ @inproceedings{Leem18c year = {2018}, url = {https://openreview.net/pdf?id=rJxlLGBElN}, abstract = {There is a demand for deep learning approaches able to process high resolution 3D - volumes in an accurate and fast way. However, training of these models is often - limited by the available GPU memory, which often results in reduced model depth, - receptive field, and input size, limiting the expressiveness of the model. In this work - we present a memory efficient modified convolutional-LSTM, which integrates - a context-rich 2D U-Net as an input in a slice based manner and subsequently - integrates the acquired slices using LSTM to create the full 3D context. Memory - savings achieved by checkpointing on one or more steps within the LSTM allow - for direct training on a single full non-contrast CT volume of: 512 x 512 x 320 on - a NVIDIA Titan X with 12 GB of VRAM. We demonstrate the effectiveness of our - method by training and segmenting the cranial cavity including soft-brain tissue - and CSF in the non-contrast CT end-to-end on the full image data, without any - stitching, while preserving a large receptive field and high expressiveness.}, + volumes in an accurate and fast way. However, training of these models is often + limited by the available GPU memory, which often results in reduced model depth, + receptive field, and input size, limiting the expressiveness of the model. In this work + we present a memory efficient modified convolutional-LSTM, which integrates + a context-rich 2D U-Net as an input in a slice based manner and subsequently + integrates the acquired slices using LSTM to create the full 3D context. Memory + savings achieved by checkpointing on one or more steps within the LSTM allow + for direct training on a single full non-contrast CT volume of: 512 x 512 x 320 on + a NVIDIA Titan X with 12 GB of VRAM. We demonstrate the effectiveness of our + method by training and segmenting the cranial cavity including soft-brain tissue + and CSF in the non-contrast CT end-to-end on the full image data, without any + stitching, while preserving a large receptive field and high expressiveness.}, file = {:pdf/Leem18c.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {2197973819111672877}, @@ -13117,7 +16258,9 @@ @article{Leem19 file = {:pdf/Leem19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {10211114630238511371}, - gscites = {1}, + gscites = {11}, + ss_id = {e03d4a9c11a70f4ec009ea81d141742b40b20789}, + all_ss_ids = {['e03d4a9c11a70f4ec009ea81d141742b40b20789']}, } @article{Leem19b, @@ -13132,13 +16275,15 @@ @article{Leem19b doi = {10.21105/joss.01576}, code = {https://github.com/silvandeleemput/memcnn}, abstract = {Neural networks are computational models that were originally inspired by biological neural networks like animal brains. These networks are composed of many small computational units called neurons that perform elementary calculations. Instead of explicitly programming the behavior of neural networks, these models can be trained to perform tasks, like classifying images, by presenting them examples. Sufficiently complex neural networks can automatically extract task-relevant characteristics from the presented examples without having prior knowledge about the task domain, which makes them attractive for many complicated real-world applications. - - Reversible operations have recently been successfully applied to classification problems to reduce memory requirements during neural network training. This feature is accomplished by removing the need to store the input activation for computing the gradients at the backward pass and instead reconstruct them on demand. However, current approaches rely on custom implementations of backpropagation, which limits applicability and extendibility. We present MemCNN, a novel PyTorch framework that simplifies the application of reversible functions by removing the need for a customized backpropagation. The framework contains a set of practical generalized tools, which can wrap common operations like convolutions and batch normalization and which take care of memory management. We validate the presented framework by reproducing state-of-the-art experiments using MemCNN and by comparing classification accuracy and training time on Cifar-10 and Cifar-100. Our MemCNN implementations achieved similar classification accuracy and faster training times while retaining compatibility with the default backpropagation facilities of PyTorch.}, + + Reversible operations have recently been successfully applied to classification problems to reduce memory requirements during neural network training. This feature is accomplished by removing the need to store the input activation for computing the gradients at the backward pass and instead reconstruct them on demand. However, current approaches rely on custom implementations of backpropagation, which limits applicability and extendibility. We present MemCNN, a novel PyTorch framework that simplifies the application of reversible functions by removing the need for a customized backpropagation. The framework contains a set of practical generalized tools, which can wrap common operations like convolutions and batch normalization and which take care of memory management. We validate the presented framework by reproducing state-of-the-art experiments using MemCNN and by comparing classification accuracy and training time on Cifar-10 and Cifar-100. Our MemCNN implementations achieved similar classification accuracy and faster training times while retaining compatibility with the default backpropagation facilities of PyTorch.}, file = {:pdf/Leem19b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, publisher = {The Open Journal}, gsid = {12471482698808856137}, - gscites = {2}, + gscites = {13}, + ss_id = {dce049bbedc5134dc6eb61fc7e5ce1d1c414bdd1}, + all_ss_ids = {['dce049bbedc5134dc6eb61fc7e5ce1d1c414bdd1']}, } @article{Leem20, @@ -13156,8 +16301,10 @@ @article{Leem20 pmid = {31484111}, month = {4}, gsid = {15708646315318329891}, - gscites = {4}, + gscites = {14}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/218599}, + ss_id = {9f7129d07d92e91ecf42dd33ef05d83514303003}, + all_ss_ids = {['9f7129d07d92e91ecf42dd33ef05d83514303003']}, } @article{Leen20, @@ -13174,110 +16321,84 @@ @article{Leen20 optnote = {DIAG}, pmid = {32459716}, year = {2020}, + ss_id = {5a1c2ad4627095f867a2fe62d61177c4631096d9}, + all_ss_ids = {['5a1c2ad4627095f867a2fe62d61177c4631096d9']}, + gscites = {221}, } -@Conference{Leeu23, - author = {van Leeuwen, Kicky G. and Hedderich, D.M. and Schalekamp, Steven}, - booktitle = ECR, - title = {Potential risk of off-label use of commercially available AI-based software for radiology}, - abstract = {Purpose or Learning Objective: The aim of this study was to analyse potential discrepancies between the claims and disclaimers of the intended purpose statements of CE-marked AI-based software for radiology. -Methods or Background: In March 2022, we asked all vendors listed on www.AIforRadiology.com (n=87) to verify or submit the intended purpose according to European clearance for their products (n=191). Any new additions were included until September 26th 2022 (n=12)). Claims and disclaimers were extracted from the statements. Potential conflicts of claims and disclaimers were flagged. -Results or Findings: We received the intended purpose statements for 157 of the 203 products. Of those, 36 were excluded as they provided too little information to analyse. The included products were certified under the current medical device regulations (class IIa = 24, class IIb = 9) and former Medical Device Directive (class I = 45, class IIa = 39, class IIb = 3). Of the 121 included statements 56 held disclaimers. For 13 of these products the claims and disclaimers were flagged to contradict each other. Potential discrepant disclaimer statements were e.g. 'act per the standard of care' (n=7) and 'not for diagnostic use' (n=6), while claiming to aid in the diagnosis, triaging or risk scoring of clinical conditions. -Conclusion: Potential discrepancies in claims and disclaimers were found for a substantial number of AI-tools bearing the risk that users of the AI software misunderstand the permitted use-cases which may lead to off-label use. -Limitations: Not all intended purpose statements received were of sufficient quality to use for analysis. The definition of what information the intended purpose should contain is not clearly specified under the MDR making it hard to objectively assess or compare.}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@conference{Leeu22, - author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J.C.M.}, - title = {The rise of artificial intelligence solutions in radiology departments in the Netherlands}, - abstract = {Purpose: There are over 180 CE-marked artificial intelligence (AI) products for radiology commercially available in Europe, but little is known about the current clinical use. We investigated the clinical use of commercial AI software in radiology departments in the Netherlands over a two-year period. -Methods: We consulted the radiology department of all hospital organizations in the Netherlands (n=69) in February-March 2020 (44 respondents) and February-March 2021 (37 respondents). A representative of the department was asked to fill in a questionnaire about the (planned) clinical use of CE marked AI products for radiology, the available funding for AI, and biggest obstacles for implementation. -Results: From 2020 to 2021 the percentage of respondents that desired the adoption of AI tools in radiology increased from 63% to 86%. In 2020, 14 responding organisations used AI in clinical practice, which increased to 23 (33% of all organizations) in 2021. The total number of AI implementations in clinical practice expanded by 157%, from 19 to 49 implementations. Also, the diversity increased from 8 to 32 unique products. In 2021, 35% of respondents had budgets allocated for AI implementations either on the departmental level or on the institutional level, which was 26% in 2020. The major obstacles for AI adoption shifted from difficulties with the technical integration (2020) to the lack of budgets and an unclear business case (2021). Technical integration remained the second most often listed obstacle. -Conclusion: AI adoption is gradually increasing in clinical radiology in the Netherlands. The number of radiology departments using AI has increased to at least a third of all organizations. Also, the number and diversity of AI applications per department grew substantially. -Limitations: Results may be influenced by a nonresponse bias.}, - booktitle = ECR, - year = {2022}, +@conference{Leeu19, + author = {van Leeuwen, Kicky G}, + title = {Opinion of Dutch healthcare professionals on the development and adoption of artificial intelligence in healthcare}, + booktitle = {EuSoMII Annual Meeting}, + year = {2019}, optnote = {DIAG, RADIOLOGY}, } -@conference{Leeu22a, - author = {Deden, Laura N. and van Leeuwen, Kicky G. and Becks, M.J. and Bernsen, M.L.E. and de Rooij, Maarten and Martens, J.M. and Meijer, F.J.A.}, - title = {Gluren bij de buren - Evaluating and sharing real-world experience of an AI stroke tool in two centres}, - abstract = {Background: Currently, many hospitals are implementing AI software. However, clear clinical implementation procedures are not yet available. In order to exchange experiences, two interventional stroke centres (Radboudumc and Rijnstate) collaborated in the prospective evaluation of an AI tool for stroke diagnostics. -Methodology: Primary aim of StrokeViewer (Nicolab) implementation in both centres was diagnostic support in detecting large vessel occlusions (LVO) in anterior cerebral circulation. Additionally, in Rijnstate analysis of cerebral CT perfusion (CTP) was available. In Radboudumc, LVO results were available after log in to the StrokeViewer server. In Rijnstate, results were pushed to PACS as a pdf-report. Trial period in Radboudumc was 12 months, in Rijnstate 7 months. The performance of proximal LVO detection was compared with radiologists’ assessments. Users filled in a questionnaire on user experience at several time points. In Radboudumc, the use was monitored by individual log-in information. -Results: Quantitative evaluation of ICA, M1 and proximal M2 occlusion detection (prevalence 18%) resulted in a case base sensitivity and specificity of 74% and 91% in Rijnstate (n=276) and 77% and 91% in Radboudumc (n=516). The use of the tool decreased over time. Radiologists unfamiliar with LVO assessment tended to value the AI report more than experienced radiologists. The net promoter scores were -56% in Radboudumc and -65% in Rijnstate. The tool was considered user friendly (7.2/10). CTP assessment in Rijnstate was used more frequently than LVO detection. -Conclusion: This evaluation aids to understand some of the challenges involved in clinical implementation and acceptance by users of AI tools. Findings are consistent for both centres. Success is not only dependent on the product and its performance, but also on clinical goal setting, expectations, context and implementation choices. Sharing experience within the NVvR AInetwork can help to gain insights into crucial factors for success (“Gluren-bij-de-buren”).}, - booktitle = {Radiologendagen}, - year = {2022}, +@conference{Leeu20, + author = {van Leeuwen, Kicky G. and Schalekamp, Steven and Rutten, Matthieu J.C.M. and van Ginneken, Bram and de Rooij, Maarten}, + title = {Scientific Evidence for 100 Commercially Available Artificial Intelligence Tools for Radiology: A Systematic Review}, + abstract = {Purpose: To survey scientific evidence for all CE marked artificial intelligence (AI) based software products for radiology available as of April, 2020. + Materials and Methods: We created an online overview of CE-certified AI software products for clinical radiology based on vendor-supplied product specifications (www.aiforradiology.com). For these products, we conducted a systematic literature study on Pubmed for original, peer-reviewed, English articles published between Jan 1, 2015 and April 14, 2020 on the efficacy of the AI software. Papers were included when the product and/or company name were mentioned, when efficacy level 2 to 6 according to Fryback was reported on an independent dataset, and when the tool was applied on in vivo human data. + Results: Our product overview consisted of 100 CE-certified software products from 51 different vendors. Among the 839 papers screened, 108 met the inclusion criteria. For 70/100 products we did not find papers that met the inclusion criteria. The evidence of the other 30 products was predominantly (84%) focused on diagnostic accuracy (efficacy level 2). Half of the available evidence (49%) was independent and not (co)-funded or (co)-authored by the vendor. In more than half (55%) of the papers the version number of the product used in the study was not mentioned. From all studies, 20 (18%) used validation data from multiple countries, 42 (39%) were multicenter studies, 25 (23%) were performed with acquisition machines from multiple manufacturers. + Conclusion: One hundred CE-certified AI software products for radiology exist today. Yet, for the majority, scientific evidence on the clinical performance and clinical impact is lacking. These insights should raise awareness that certification may not guarantee technical and clinical efficacy of an AI product. + Clinical relevance: Our findings identify the available evidence for commercially available AI software, aiming to contribute to a safe and effective implementation of AI software in radiology departments.}, + booktitle = RSNA, + year = {2020}, optnote = {DIAG, RADIOLOGY}, } -@Conference{Leeu22b, - author = {van Leeuwen, Kicky G. and Becks, M.J. and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J.C.M. and de Rooij, Maarten and Meijer, F.J.A.}, - booktitle = ECR, - title = {Real-world evaluation of artificial intelligence software for cerebral large vessel occlusion detection in {CT} angiography}, - abstract = {Purpose: The commercially available AI tool (StrokeViewer v2, Nicolab) supports the diagnostic process of stroke by detecting large vessel occlusions (LVO) on CTA. We prospectively evaluated this tool in our department to monitor safety and impact. -Methods: We implemented the software with the goal to improve the diagnosis of LVO and elevate the diagnostic confidence of the radiologist (resident). We used quantitative measures (data from clinical systems, vendor log files) and qualitative measures (user survey) to analyse diagnostic performance, number of users, login attempts, radiologists’ diagnostic confidence, and user experience. -Results: In total, 226 CTAs with a clinical indication of stroke between January-June 2021 were prospectively evaluated. Thirteen cases of posterior circulation and distal vessel occlusions were excluded as they were outside the intended use of the AI tool. The AI tool missed 12 of the 36 occlusions in the middle cerebral or intracranial internal carotid artery (M1=1, M2=10, ICA=1) resulting in an accuracy of 86.4%. Irrespective of location, the sensitivity was 77.8% and specificity 90.4%. The number of monthly unique users varied between 8 and 24 radiologists/residents. Log in attempts dropped after the initial month (which included training) to a monthly average of 44 attempts. The diagnostic confidence did not increase during the use of the tool. The likelihood that users would recommend StrokeViewer to colleagues was rated 4.5/10. -Conclusion: Over six months, the use of StrokeViewer dropped and users did not sense improvement of diagnostic confidence. Measures have been taken to stimulate adoption for the latter six months of the trial period. -Limitation: Because of the prospective character, no comparison could be made between radiologists supported by AI vs radiologists without AI.}, - optnote = {DIAG, RADIOLOGY}, - year = {2022}, -} - @article{Leeu21, - author = {van Leeuwen, Kicky G. and Schalekamp, Steven and Rutten, Matthieu J.C.M. and van Ginneken, Bram and de Rooij, Maarten}, - title = {Artificial intelligence in radiology: 100 commercially available products and their scientific evidence}, - journal = ER, - year = {2021}, - volume = {31}, - pages = {3797–3804}, - issn = {1432-1084}, - doi = {10.1007/s00330-021-07892-z}, - url = {https://doi.org/10.1007/s00330-021-07892-z}, - abstract = {OBJECTIVES: Map the current landscape of commercially available artificial intelligence (AI) software for radiology and review the availability of their scientific evidence. METHODS: We created an online overview of CE-marked AI software products for clinical radiology based on vendor-supplied product specifications (www.aiforradiology.com ). Characteristics such as modality, subspeciality, main task, regulatory information, deployment, and pricing model were retrieved. We conducted an extensive literature search on the available scientific evidence of these products. Articles were classified according to a hierarchical model of efficacy. RESULTS: The overview included 100 CE-marked AI products from 54 different vendors. For 64/100 products, there was no peer-reviewed evidence of its efficacy. We observed a large heterogeneity in deployment methods, pricing models, and regulatory classes. The evidence of the remaining 36/100 products comprised 237 papers that predominantly (65%) focused on diagnostic accuracy (efficacy level 2). From the 100 products, 18 had evidence that regarded level 3 or higher, validating the (potential) impact on diagnostic thinking, patient outcome, or costs. Half of the available evidence (116/237) were independent and not (co-)funded or (co-)authored by the vendor. CONCLUSIONS: Even though the commercial supply of AI software in radiology already holds 100 CE-marked products, we conclude that the sector is still in its infancy. For 64/100 products, peer-reviewed evidence on its efficacy is lacking. Only 18/100 AI products have demonstrated (potential) clinical impact. KEY POINTS: • Artificial intelligence in radiology is still in its infancy even though already 100 CE-marked AI products are commercially available. • Only 36 out of 100 products have peer-reviewed evidence of which most studies demonstrate lower levels of efficacy. • There is a wide variety in deployment strategies, pricing models, and CE marking class of AI products for radiology.}, - file = {Leeu21.pdf:pdf\\Leeu21.pdf:PDF}, - issn-linking = {0938-7994}, - keywords = {Artificial intelligence; Radiology; Device approval; Evidence-based practice}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pmid = {33856519}, + author = {van Leeuwen, Kicky G. and Schalekamp, Steven and Rutten, Matthieu J.C.M. and van Ginneken, Bram and de Rooij, Maarten}, + title = {Artificial intelligence in radiology: 100 commercially available products and their scientific evidence}, + journal = ER, + year = {2021}, + volume = {31}, + pages = {3797-3804}, + doi = {10.1007/s00330-021-07892-z}, + url = {https://doi.org/10.1007/s00330-021-07892-z}, + abstract = {OBJECTIVES: Map the current landscape of commercially available artificial intelligence (AI) software for radiology and review the availability of their scientific evidence. METHODS: We created an online overview of CE-marked AI software products for clinical radiology based on vendor-supplied product specifications (www.aiforradiology.com ). Characteristics such as modality, subspeciality, main task, regulatory information, deployment, and pricing model were retrieved. We conducted an extensive literature search on the available scientific evidence of these products. Articles were classified according to a hierarchical model of efficacy. RESULTS: The overview included 100 CE-marked AI products from 54 different vendors. For 64/100 products, there was no peer-reviewed evidence of its efficacy. We observed a large heterogeneity in deployment methods, pricing models, and regulatory classes. The evidence of the remaining 36/100 products comprised 237 papers that predominantly (65%) focused on diagnostic accuracy (efficacy level 2). From the 100 products, 18 had evidence that regarded level 3 or higher, validating the (potential) impact on diagnostic thinking, patient outcome, or costs. Half of the available evidence (116/237) were independent and not (co-)funded or (co-)authored by the vendor. CONCLUSIONS: Even though the commercial supply of AI software in radiology already holds 100 CE-marked products, we conclude that the sector is still in its infancy. For 64/100 products, peer-reviewed evidence on its efficacy is lacking. Only 18/100 AI products have demonstrated (potential) clinical impact. KEY POINTS: * Artificial intelligence in radiology is still in its infancy even though already 100 CE-marked AI products are commercially available. * Only 36 out of 100 products have peer-reviewed evidence of which most studies demonstrate lower levels of efficacy. * There is a wide variety in deployment strategies, pricing models, and CE marking class of AI products for radiology.}, + file = {Leeu21.pdf:pdf\\Leeu21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + pmid = {33856519}, + ss_id = {93e353567bbc7525306a14b6219973d34b084c4e}, + all_ss_ids = {['93e353567bbc7525306a14b6219973d34b084c4e']}, + gscites = {140}, } @article{Leeu21a, - author = {van Leeuwen, Kicky G and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J C M}, - title = {How does artificial intelligence in radiology improve efficiency and health outcomes?}, - journal = PEDRAD, - issn = {1432-1998}, - doi = {10.1007/s00247-021-05114-8}, - url = {https://doi.org/10.1007/s00247-021-05114-8}, - abstract = {Since the introduction of artificial intelligence (AI) in radiology, the promise has been that it will improve health care and reduce costs. Has AI been able to fulfill that promise? We describe six clinical objectives that can be supported by AI: a more efficient workflow, shortened reading time, a reduction of dose and contrast agents, earlier detection of disease, improved diagnostic accuracy and more personalized diagnostics. We provide examples of use cases including the available scientific evidence for its impact based on a hierarchical model of efficacy. We conclude that the market is still maturing and little is known about the contribution of AI to clinical practice. More real-world monitoring of AI in clinical practice is expected to aid in determining the value of AI and making informed decisions on development, procurement and reimbursement.}, - file = {Leeu21a.pdf:pdf\\Leeu21a.pdf:PDF}, - keywords = {Artificial intelligence; Pediatrics; Evidence-based practice; Impact analysis; Innovation; Radiology; Value-based healthcare}, - optnote = {DIAG, RADIOLOGY}, - number = {11}, - pages = {2087--2093}, - pmid = {34117522}, - volume = {52}, - year = {2022} -} - -@Article{Leeu21b, - author = {van Leeuwen, Kicky G and Meijer, Frederick J A and Schalekamp, Steven and Rutten, Matthieu J C M and van Dijk, Ewoud J and van Ginneken, Bram and Govers, Tim M and Rooij, Maarten De}, - title = {{Cost ‑ effectiveness of artificial intelligence aided vessel occlusion detection in acute stroke: an early health technology assessment}}, - doi = {10.1186/s13244-021-01077-4}, - issn = {1869-4101}, - pages = {133}, - url = {https://doi.org/10.1186/s13244-021-01077-4}, - volume = {12}, - file = {Leeu21b.pdf:pdf\\Leeu21b.pdf:PDF}, - journal = INSI, - keywords = {Artificial intelligence, Computed tomography angiography, Cost–benefit analysis, Endovascular procedures, Stroke, artificial intelligence, benefit analysis, computed tomography angiography, cost, endovascular, stroke}, - optnote = {DIAG, RADIOLOGY}, + author = {van Leeuwen, Kicky G and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J C M}, + title = {How does artificial intelligence in radiology improve efficiency and health outcomes?}, + journal = PEDRAD, + doi = {10.1007/s00247-021-05114-8}, + url = {https://doi.org/10.1007/s00247-021-05114-8}, + abstract = {Since the introduction of artificial intelligence (AI) in radiology, the promise has been that it will improve health care and reduce costs. Has AI been able to fulfill that promise? We describe six clinical objectives that can be supported by AI: a more efficient workflow, shortened reading time, a reduction of dose and contrast agents, earlier detection of disease, improved diagnostic accuracy and more personalized diagnostics. We provide examples of use cases including the available scientific evidence for its impact based on a hierarchical model of efficacy. We conclude that the market is still maturing and little is known about the contribution of AI to clinical practice. More real-world monitoring of AI in clinical practice is expected to aid in determining the value of AI and making informed decisions on development, procurement and reimbursement.}, + file = {Leeu21a.pdf:pdf\\Leeu21a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + number = {11}, + pages = {2087--2093}, + pmid = {34117522}, + volume = {52}, + year = {2022}, + ss_id = {5f6c21ae64b9db06da177f44b2d59a0dd6f9c583}, + all_ss_ids = {['5f6c21ae64b9db06da177f44b2d59a0dd6f9c583']}, + gscites = {50}, +} + +@article{Leeu21b, + author = {van Leeuwen, Kicky G and Meijer, Frederick J A and Schalekamp, Steven and Rutten, Matthieu J C M and van Dijk, Ewoud J and van Ginneken, Bram and Govers, Tim M and Rooij, Maarten De}, + title = {{Cost - effectiveness of artificial intelligence aided vessel occlusion detection in acute stroke: an early health technology assessment}}, + doi = {10.1186/s13244-021-01077-4}, + pages = {133}, + url = {https://doi.org/10.1186/s13244-021-01077-4}, + volume = {12}, + file = {Leeu21b.pdf:pdf\\Leeu21b.pdf:PDF}, + journal = INSI, + optnote = {DIAG, RADIOLOGY}, publisher = {Springer International Publishing}, - year = {2021}, + year = {2021}, + ss_id = {dc68e0f61994ca0e768584d39c108305287f338a}, + all_ss_ids = {['dc68e0f61994ca0e768584d39c108305287f338a']}, + gscites = {14}, } @conference{Leeu21c, @@ -13286,21 +16407,21 @@ @conference{Leeu21c booktitle = ECR, year = {2021}, abstract = {Purpose: There are over 150 artificial intelligence (AI) products for radiology offered, but little is known about their current clinical use. We investigated actual clinical use of AI software in radiology departments in the Netherlands. -Materials and Methods: We consulted the radiology department of each hospital organization in the Netherlands (n=70) about their current AI implementations and plans from February-March 2020. A representative of the department was asked to fill in a questionnaire about their knowledge, experience, research and/or clinical use of commercially available CE-certified AI products for radiology (n=93). Responses for these familiarity-levels were analysed to create an overview with quantitative metrics. -Results: The response rate of the consulted hospitals was 43/70: 38/62 for general hospitals, 5/7 for academic medical centers, and 0/1 for children’s hospitals. Of the respondents 30 (70%) were radiologists, 5 (12%) application or information managers, and 8 (19%), among others, clinical physicists and managers. A third (14) of the participating organizations had one to three AI applications in clinical use, with a total of 19 implementations. These implementations involved eight different vendors of which four were from the Netherlands. Most commonly used was software for bone age prediction and stroke detection. Respondents were most familiar with products aimed at neurology and cardiology. MR, CT and mammography were the most familiar modalities for AI. Most interest for clinical implementation was shown in software to triage exams. Eleven organizations (26%) had a dedicated budget for AI, either from the hospital or the department. -Conclusion: Even though the supply of AI software is extensive, clinical use remains limited showing that we are still in the initial stages of integrating AI in clinical practice in the Netherlands. -Limitations: Results may be influenced by a nonresponse bias.}, + Materials and Methods: We consulted the radiology department of each hospital organization in the Netherlands (n=70) about their current AI implementations and plans from February-March 2020. A representative of the department was asked to fill in a questionnaire about their knowledge, experience, research and/or clinical use of commercially available CE-certified AI products for radiology (n=93). Responses for these familiarity-levels were analysed to create an overview with quantitative metrics. + Results: The response rate of the consulted hospitals was 43/70: 38/62 for general hospitals, 5/7 for academic medical centers, and 0/1 for children's hospitals. Of the respondents 30 (70%) were radiologists, 5 (12%) application or information managers, and 8 (19%), among others, clinical physicists and managers. A third (14) of the participating organizations had one to three AI applications in clinical use, with a total of 19 implementations. These implementations involved eight different vendors of which four were from the Netherlands. Most commonly used was software for bone age prediction and stroke detection. Respondents were most familiar with products aimed at neurology and cardiology. MR, CT and mammography were the most familiar modalities for AI. Most interest for clinical implementation was shown in software to triage exams. Eleven organizations (26%) had a dedicated budget for AI, either from the hospital or the department. + Conclusion: Even though the supply of AI software is extensive, clinical use remains limited showing that we are still in the initial stages of integrating AI in clinical practice in the Netherlands. + Limitations: Results may be influenced by a nonresponse bias.}, optnote = {DIAG, RADIOLOGY}, } @conference{Leeu21d, - author = {van Leeuwen, Kicky G and Meijer, Frederick J A and Schalekamp, Steven and Rutten, Matthieu J C M and van Dijk, Ewoud J and van Ginneken, Bram and Govers, Tim M and de Rooij, Maarten}, + author = {van Leeuwen, Kicky G and Meijer, Frederick J A and Schalekamp, Steven and Rutten, Matthieu J C M and van Dijk, Ewoud J and van Ginneken, Bram and Govers, Tim M and de Rooij, Maarten}, title = {Artificial Intelligence in Acute Stroke: an Early Health Technology Assessment of Vessel Occlusion Detection on Computed Tomography}, abstract = {Purpose or Learning Objective: To demonstrate the cost-effectiveness of artificial intelligence (AI) software to aid in the detection of intracranial vessel occlusions in stroke compared to standard care by performing early health technology assessment. -Methods or Background: We used a Markov based model from a societal perspective in a UK setting to demonstrate the potential value of an AI tool reported in expected incremental costs (IC) and effects (IE) in quality adjusted life years (QALYs). Initial population existed of patients suspected of stroke based on symptoms and exclusion of other causes as demonstrated by non-contrast cerebrum CT. Input parameters for the model were predominantly based on stroke registry data from the UK and complemented with pooled outcome data from large randomized trials. Parameters were varied to demonstrate model robustness. -Results or Findings: The AI strategy with its base-case parameters (6% missed diagnoses of intra-arterial therapy eligible patients by clinicians, $40 per AI analysis, 50% reduction of missed vessel occlusions by AI) resulted in modest cost-savings and incremental QALYs over the projected lifetime (IC: - $156, -0.23%; IE: +0.01 QALYs, +0.07%) per ischaemic stroke patient. Within a ninety-day window after treatment no financial (IC: +$60) and negligible QALY (IE: +0.0001) gain was observed. For each yearly cohort of patients in the UK this translates to a total cost saving of $11 million. -Conclusion: We showed that computer aided thrombus detection in emergency care has the potential to increase health and save costs. Results may contribute to the debate on the investments, financial accountability and reimbursement for the clinical use of AI technology. -Limitations: Parameter values of the model were based on results from previous studies.}, + Methods or Background: We used a Markov based model from a societal perspective in a UK setting to demonstrate the potential value of an AI tool reported in expected incremental costs (IC) and effects (IE) in quality adjusted life years (QALYs). Initial population existed of patients suspected of stroke based on symptoms and exclusion of other causes as demonstrated by non-contrast cerebrum CT. Input parameters for the model were predominantly based on stroke registry data from the UK and complemented with pooled outcome data from large randomized trials. Parameters were varied to demonstrate model robustness. + Results or Findings: The AI strategy with its base-case parameters (6% missed diagnoses of intra-arterial therapy eligible patients by clinicians, $40 per AI analysis, 50% reduction of missed vessel occlusions by AI) resulted in modest cost-savings and incremental QALYs over the projected lifetime (IC: - $156, -0.23%; IE: +0.01 QALYs, +0.07%) per ischaemic stroke patient. Within a ninety-day window after treatment no financial (IC: +$60) and negligible QALY (IE: +0.0001) gain was observed. For each yearly cohort of patients in the UK this translates to a total cost saving of $11 million. + Conclusion: We showed that computer aided thrombus detection in emergency care has the potential to increase health and save costs. Results may contribute to the debate on the investments, financial accountability and reimbursement for the clinical use of AI technology. + Limitations: Parameter values of the model were based on results from previous studies.}, booktitle = ECR, year = {2021}, optnote = {DIAG, RADIOLOGY}, @@ -13310,10 +16431,10 @@ @conference{Leeu21e author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Rutten, Matthieu J.C.M. and van Ginneken, Bram and Schalekamp, Steven}, title = {Performance Of A Commercial Software Package For Lung Nodule Detection On Chest Radiographs Compared With 8 Expert Readers}, abstract = {Purpose: Multi-center evaluation of the stand-alone performance of commercially available lung nodule detectionsoftware (Lunit INSIGHT CXR3). -Methods and Materials: A set of 300 posteroanterior (PA) and lateral chest radiographs from four medical centers in theNetherlands was collected. Solitary lung nodules ranging from 5 to 35 mm in size were present in 111 of the cases. Allnodules were confirmed by CT within three months of the radiograph acquisition. Control radiographs were determinedbased on a negative CT within six months. Five radiologists and three radiology residents scored the set to provide contextto the algorithm performance. All PA radiographs were processed by Lunit INSIGHT CXR3, a commercial software productthat detects ten common abnormalities in chest radiographs. Area under the receiver operating characteristics curve (AUC)and sensitivity at 90% specificity were used to measure performance. Multi-reader multi-case ROC analysis based on U-statistics (iMRMC-v4 software) was applied to compare CXR3 with the readers. Subanalysis was performed regardingnodule size (small<15mm, large>15mm) and conspicuity levels (well visible, moderately visible, subtle, very subtle). -Results: Out of the 300 radiographs, 7 could not be processed by CXR3, resulting in a set of 104 nodule cases and 189normal cases for evaluation. The CXR3 AUC was 0.93 and significantly higher than the mean reader AUC of 0.82 (p<0.001).CXR3 was also significantly better than the best reader with an AUC of 0.88 (p=0.028). At a specificity level of 90%,sensitivity was 83.2% for CXR3 and 63.3% (std±7.5%) for the reader average. Regarding conspicuity of the nodules, CXR3AUCs were 0.99 for well visible, 0.94 for moderately visible, 0.94 for subtle, and 0.78 for very subtle nodules. No significantdifference in CXR3 performance was observed between the detection of small (AUC 0.91) and large nodules (AUC 0.93). -Conclusions: Lunit INSIGHT CXR3 significantly outperforms the comparison group of eight readers in nodule detection onchest radiographs. -Clinical Relevance/Application: Generalizability of artificial intelligence algorithms is not trivial. Performance studiesincrease confidence in algorithms to the users, especially to those with similar patient populations.}, + Methods and Materials: A set of 300 posteroanterior (PA) and lateral chest radiographs from four medical centers in theNetherlands was collected. Solitary lung nodules ranging from 5 to 35 mm in size were present in 111 of the cases. Allnodules were confirmed by CT within three months of the radiograph acquisition. Control radiographs were determinedbased on a negative CT within six months. Five radiologists and three radiology residents scored the set to provide contextto the algorithm performance. All PA radiographs were processed by Lunit INSIGHT CXR3, a commercial software productthat detects ten common abnormalities in chest radiographs. Area under the receiver operating characteristics curve (AUC)and sensitivity at 90% specificity were used to measure performance. Multi-reader multi-case ROC analysis based on U-statistics (iMRMC-v4 software) was applied to compare CXR3 with the readers. Subanalysis was performed regardingnodule size (small<15mm, large>15mm) and conspicuity levels (well visible, moderately visible, subtle, very subtle). + Results: Out of the 300 radiographs, 7 could not be processed by CXR3, resulting in a set of 104 nodule cases and 189normal cases for evaluation. The CXR3 AUC was 0.93 and significantly higher than the mean reader AUC of 0.82 (p<0.001).CXR3 was also significantly better than the best reader with an AUC of 0.88 (p=0.028). At a specificity level of 90%,sensitivity was 83.2% for CXR3 and 63.3% (std+-7.5%) for the reader average. Regarding conspicuity of the nodules, CXR3AUCs were 0.99 for well visible, 0.94 for moderately visible, 0.94 for subtle, and 0.78 for very subtle nodules. No significantdifference in CXR3 performance was observed between the detection of small (AUC 0.91) and large nodules (AUC 0.93). + Conclusions: Lunit INSIGHT CXR3 significantly outperforms the comparison group of eight readers in nodule detection onchest radiographs. + Clinical Relevance/Application: Generalizability of artificial intelligence algorithms is not trivial. Performance studiesincrease confidence in algorithms to the users, especially to those with similar patient populations.}, booktitle = RSNA, year = {2021}, optnote = {DIAG, RADIOLOGY}, @@ -13323,34 +16444,142 @@ @conference{Leeu21f author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Rutten, Matthieu J.C.M. and Schalekamp, Steven and van Ginneken, Bram}, title = {Commercial Artificial Intelligence Solutions For Radiology: A Market Update}, abstract = {Purpose: Provide an overview of the current market of regulatory-cleared artificial intelligence (AI) software for radiology. -Methods and Materials: An overview of CE marked AI products for clinical radiology is maintained online (https://www.AIforRadiology.com). Vendors were asked to verify and complete the product information. This overviewallows for analysis of market trends. Characteristics of the market were based on the state of the database on the 1st ofMay 2021. -Results: In May 2021 there were 161 CE marked AI products on the market, an increase of 36% compared with one yearprior. The growth from 2019 to 2020 was 69% (from 70 to 118 products). The number of vendors offering AI products onlygrew with 13% from 61 in 2020 to 69 in 2021. The average number of products per company therefore increased from 1.9to 2.3. The time from company founding to the first product on the market is on average 4 years and 1 month. Mostprevalent are tools for neuro and chest imaging. With respect to modality, CT and MR covered 62% of all products. Half ofthe CE marked AI products (51%) have also been cleared by the FDA. To our knowledge, only four products were CEmarked under the new Medical Device Regulations. Subscription or licensing are the most popular pricing models. Themajority of products are offered with both the option of local and cloud-based installation. -Conclusions: The growth of AI products new to the market is slowing down. This effect is even stronger for vendors.Existing vendors have been expanding their portfolios. -Clinical Relevance/Application: The market of AI products for radiology is growing. Our research provides a transparentoverview of the available products and their evidence.}, + Methods and Materials: An overview of CE marked AI products for clinical radiology is maintained online (https://www.AIforRadiology.com). Vendors were asked to verify and complete the product information. This overviewallows for analysis of market trends. Characteristics of the market were based on the state of the database on the 1st ofMay 2021. + Results: In May 2021 there were 161 CE marked AI products on the market, an increase of 36% compared with one yearprior. The growth from 2019 to 2020 was 69% (from 70 to 118 products). The number of vendors offering AI products onlygrew with 13% from 61 in 2020 to 69 in 2021. The average number of products per company therefore increased from 1.9to 2.3. The time from company founding to the first product on the market is on average 4 years and 1 month. Mostprevalent are tools for neuro and chest imaging. With respect to modality, CT and MR covered 62% of all products. Half ofthe CE marked AI products (51%) have also been cleared by the FDA. To our knowledge, only four products were CEmarked under the new Medical Device Regulations. Subscription or licensing are the most popular pricing models. Themajority of products are offered with both the option of local and cloud-based installation. + Conclusions: The growth of AI products new to the market is slowing down. This effect is even stronger for vendors.Existing vendors have been expanding their portfolios. + Clinical Relevance/Application: The market of AI products for radiology is growing. Our research provides a transparentoverview of the available products and their evidence.}, booktitle = RSNA, year = {2021}, optnote = {DIAG, RADIOLOGY}, } -@conference{Leeu20, - author = {van Leeuwen, Kicky G. and Schalekamp, Steven and Rutten, Matthieu J.C.M. and van Ginneken, Bram and de Rooij, Maarten}, - title = {Scientific Evidence for 100 Commercially Available Artificial Intelligence Tools for Radiology: A Systematic Review}, - abstract = {Purpose: To survey scientific evidence for all CE marked artificial intelligence (AI) based software products for radiology available as of April, 2020. - Materials and Methods: We created an online overview of CE-certified AI software products for clinical radiology based on vendor-supplied product specifications (www.aiforradiology.com). For these products, we conducted a systematic literature study on Pubmed for original, peer-reviewed, English articles published between Jan 1, 2015 and April 14, 2020 on the efficacy of the AI software. Papers were included when the product and/or company name were mentioned, when efficacy level 2 to 6 according to Fryback was reported on an independent dataset, and when the tool was applied on in vivo human data. - Results: Our product overview consisted of 100 CE-certified software products from 51 different vendors. Among the 839 papers screened, 108 met the inclusion criteria. For 70/100 products we did not find papers that met the inclusion criteria. The evidence of the other 30 products was predominantly (84%) focused on diagnostic accuracy (efficacy level 2). Half of the available evidence (49%) was independent and not (co)-funded or (co)-authored by the vendor. In more than half (55%) of the papers the version number of the product used in the study was not mentioned. From all studies, 20 (18%) used validation data from multiple countries, 42 (39%) were multicenter studies, 25 (23%) were performed with acquisition machines from multiple manufacturers. - Conclusion: One hundred CE-certified AI software products for radiology exist today. Yet, for the majority, scientific evidence on the clinical performance and clinical impact is lacking. These insights should raise awareness that certification may not guarantee technical and clinical efficacy of an AI product. - Clinical relevance: Our findings identify the available evidence for commercially available AI software, aiming to contribute to a safe and effective implementation of AI software in radiology departments.}, - booktitle = RSNA, - year = {2020}, +@conference{Leeu22, + author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J.C.M.}, + title = {The rise of artificial intelligence solutions in radiology departments in the Netherlands}, + abstract = {Purpose: There are over 180 CE-marked artificial intelligence (AI) products for radiology commercially available in Europe, but little is known about the current clinical use. We investigated the clinical use of commercial AI software in radiology departments in the Netherlands over a two-year period. + Methods: We consulted the radiology department of all hospital organizations in the Netherlands (n=69) in February-March 2020 (44 respondents) and February-March 2021 (37 respondents). A representative of the department was asked to fill in a questionnaire about the (planned) clinical use of CE marked AI products for radiology, the available funding for AI, and biggest obstacles for implementation. + Results: From 2020 to 2021 the percentage of respondents that desired the adoption of AI tools in radiology increased from 63% to 86%. In 2020, 14 responding organisations used AI in clinical practice, which increased to 23 (33% of all organizations) in 2021. The total number of AI implementations in clinical practice expanded by 157%, from 19 to 49 implementations. Also, the diversity increased from 8 to 32 unique products. In 2021, 35% of respondents had budgets allocated for AI implementations either on the departmental level or on the institutional level, which was 26% in 2020. The major obstacles for AI adoption shifted from difficulties with the technical integration (2020) to the lack of budgets and an unclear business case (2021). Technical integration remained the second most often listed obstacle. + Conclusion: AI adoption is gradually increasing in clinical radiology in the Netherlands. The number of radiology departments using AI has increased to at least a third of all organizations. Also, the number and diversity of AI applications per department grew substantially. + Limitations: Results may be influenced by a nonresponse bias.}, + booktitle = ECR, + year = {2022}, optnote = {DIAG, RADIOLOGY}, } -@conference{Leeu19, - author = {van Leeuwen, Kicky G}, - title = {Opinion of Dutch healthcare professionals on the development and adoption of artificial intelligence in healthcare}, - booktitle = {EuSoMII Annual Meeting}, - year = {2019}, +@conference{Leeu22a, + author = {Deden, Laura N. and van Leeuwen, Kicky G. and Becks, M.J. and Bernsen, M.L.E. and de Rooij, Maarten and Martens, J.M. and Meijer, F.J.A.}, + title = {Gluren bij de buren - Evaluating and sharing real-world experience of an AI stroke tool in two centres}, + abstract = {Background: Currently, many hospitals are implementing AI software. However, clear clinical implementation procedures are not yet available. In order to exchange experiences, two interventional stroke centres (Radboudumc and Rijnstate) collaborated in the prospective evaluation of an AI tool for stroke diagnostics. + Methodology: Primary aim of StrokeViewer (Nicolab) implementation in both centres was diagnostic support in detecting large vessel occlusions (LVO) in anterior cerebral circulation. Additionally, in Rijnstate analysis of cerebral CT perfusion (CTP) was available. In Radboudumc, LVO results were available after log in to the StrokeViewer server. In Rijnstate, results were pushed to PACS as a pdf-report. Trial period in Radboudumc was 12 months, in Rijnstate 7 months. The performance of proximal LVO detection was compared with radiologists' assessments. Users filled in a questionnaire on user experience at several time points. In Radboudumc, the use was monitored by individual log-in information. + Results: Quantitative evaluation of ICA, M1 and proximal M2 occlusion detection (prevalence 18%) resulted in a case base sensitivity and specificity of 74% and 91% in Rijnstate (n=276) and 77% and 91% in Radboudumc (n=516). The use of the tool decreased over time. Radiologists unfamiliar with LVO assessment tended to value the AI report more than experienced radiologists. The net promoter scores were -56% in Radboudumc and -65% in Rijnstate. The tool was considered user friendly (7.2/10). CTP assessment in Rijnstate was used more frequently than LVO detection. + Conclusion: This evaluation aids to understand some of the challenges involved in clinical implementation and acceptance by users of AI tools. Findings are consistent for both centres. Success is not only dependent on the product and its performance, but also on clinical goal setting, expectations, context and implementation choices. Sharing experience within the NVvR AInetwork can help to gain insights into crucial factors for success ("Gluren-bij-de-buren").}, + booktitle = {Radiologendagen}, + year = {2022}, + optnote = {DIAG, RADIOLOGY}, +} + +@conference{Leeu22b, + author = {van Leeuwen, Kicky G. and Becks, M.J. and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J.C.M. and de Rooij, Maarten and Meijer, F.J.A.}, + booktitle = ECR, + title = {Real-world evaluation of artificial intelligence software for cerebral large vessel occlusion detection in {CT} angiography}, + abstract = {Purpose: The commercially available AI tool (StrokeViewer v2, Nicolab) supports the diagnostic process of stroke by detecting large vessel occlusions (LVO) on CTA. We prospectively evaluated this tool in our department to monitor safety and impact. + Methods: We implemented the software with the goal to improve the diagnosis of LVO and elevate the diagnostic confidence of the radiologist (resident). We used quantitative measures (data from clinical systems, vendor log files) and qualitative measures (user survey) to analyse diagnostic performance, number of users, login attempts, radiologists' diagnostic confidence, and user experience. + Results: In total, 226 CTAs with a clinical indication of stroke between January-June 2021 were prospectively evaluated. Thirteen cases of posterior circulation and distal vessel occlusions were excluded as they were outside the intended use of the AI tool. The AI tool missed 12 of the 36 occlusions in the middle cerebral or intracranial internal carotid artery (M1=1, M2=10, ICA=1) resulting in an accuracy of 86.4%. Irrespective of location, the sensitivity was 77.8% and specificity 90.4%. The number of monthly unique users varied between 8 and 24 radiologists/residents. Log in attempts dropped after the initial month (which included training) to a monthly average of 44 attempts. The diagnostic confidence did not increase during the use of the tool. The likelihood that users would recommend StrokeViewer to colleagues was rated 4.5/10. + Conclusion: Over six months, the use of StrokeViewer dropped and users did not sense improvement of diagnostic confidence. Measures have been taken to stimulate adoption for the latter six months of the trial period. + Limitation: Because of the prospective character, no comparison could be made between radiologists supported by AI vs radiologists without AI.}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, +} + +@conference{Leeu23, + author = {van Leeuwen, Kicky G. and Hedderich, D.M. and Schalekamp, Steven}, + booktitle = ECR, + title = {Potential risk of off-label use of commercially available AI-based software for radiology}, + abstract = {Purpose or Learning Objective: The aim of this study was to analyse potential discrepancies between the claims and disclaimers of the intended purpose statements of CE-marked AI-based software for radiology. + Methods or Background: In March 2022, we asked all vendors listed on www.AIforRadiology.com (n=87) to verify or submit the intended purpose according to European clearance for their products (n=191). Any new additions were included until September 26th 2022 (n=12)). Claims and disclaimers were extracted from the statements. Potential conflicts of claims and disclaimers were flagged. + Results or Findings: We received the intended purpose statements for 157 of the 203 products. Of those, 36 were excluded as they provided too little information to analyse. The included products were certified under the current medical device regulations (class IIa = 24, class IIb = 9) and former Medical Device Directive (class I = 45, class IIa = 39, class IIb = 3). Of the 121 included statements 56 held disclaimers. For 13 of these products the claims and disclaimers were flagged to contradict each other. Potential discrepant disclaimer statements were e.g. 'act per the standard of care' (n=7) and 'not for diagnostic use' (n=6), while claiming to aid in the diagnosis, triaging or risk scoring of clinical conditions. + Conclusion: Potential discrepancies in claims and disclaimers were found for a substantial number of AI-tools bearing the risk that users of the AI software misunderstand the permitted use-cases which may lead to off-label use. + Limitations: Not all intended purpose statements received were of sufficient quality to use for analysis. The definition of what information the intended purpose should contain is not clearly specified under the MDR making it hard to objectively assess or compare.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + +@article{Leeu23a, + author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J. C. M.}, + title = {Clinical use of artificial intelligence products for radiology in the Netherlands between 2020 and 2022}, + doi = {10.1007/s00330-023-09991-5}, + url = {http://dx.doi.org/10.1007/s00330-023-09991-5}, + abstract = {Abstract + Objectives + To map the clinical use of CE-marked artificial intelligence (AI)-based software in radiology departments in the Netherlands (n = 69) between 2020 and 2022. + + Materials and methods + Our AI network (one radiologist or AI representative per Dutch hospital organization) received a questionnaire each spring from 2020 to 2022 about AI product usage, financing, and obstacles to adoption. Products that were not listed on www.AIforRadiology.com by July 2022 were excluded from the analysis. + + Results + The number of respondents was 43 in 2020, 36 in 2021, and 33 in 2022. The number of departments using AI has been growing steadily (2020: 14, 2021: 19, 2022: 23). The diversity (2020: 7, 2021: 18, 2022: 34) and the number of total implementations (2020: 19, 2021: 38, 2022: 68) has rapidly increased. Seven implementations were discontinued in 2022. Four hospital organizations said to use an AI platform or marketplace for the deployment of AI solutions. AI is mostly used to support chest CT (17), neuro CT (17), and musculoskeletal radiograph (12) analysis. The budget for AI was reserved in 13 of the responding centers in both 2021 and 2022. The most important obstacles to the adoption of AI remained costs and IT integration. Of the respondents, 28% stated that the implemented AI products realized health improvement and 32% assumed both health improvement and cost savings. + + Conclusion + The adoption of AI products in radiology departments in the Netherlands is showing common signs of a developing market. The major obstacles to reaching widespread adoption are a lack of financial resources and IT integration difficulties. + + Clinical relevance statement + The clinical impact of AI starts with its adoption in daily clinical practice. Increased transparency around AI products being adopted, implementation obstacles, and impact may inspire increased collaboration and improved decision-making around the implementation and financing of AI products. + + Key Points + The adoption of artificial intelligence products for radiology has steadily increased since 2020 to at least a third of the centers using AI in clinical practice in the Netherlands in 2022. + The main areas in which artificial intelligence products are used are lung nodule detection on CT, aided stroke diagnosis, and bone age prediction. + The majority of respondents experienced added value (decreased costs and/or improved outcomes) from using artificial intelligence-based software; however, major obstacles to adoption remain the costs and IT-related difficulties.}, + citation-count = {0}, + file = {Leeu23a.pdf:pdf\Leeu23a.pdf:PDF}, + journal = {European Radiology}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {03a68b7a56dda9b3cecacfd01d4f7225998fe4c8}, + all_ss_ids = {['03a68b7a56dda9b3cecacfd01d4f7225998fe4c8']}, + gscites = {1}, +} + +@article{Leeu23b, + author = {K.G.van Leeuwen and M.J. Becks and D. Grob and F. de Lange and J.H.E. Rutten and S. Schalekamp and M.J.C.M. Rutten and B. van Ginneken and M. de Rooij and F.J.A. Meijer}, + title = {AI-support for the detection of intracranial large vessel occlusions: One-year prospective evaluation}, + doi = {10.1016/j.heliyon.2023.e19065}, + abstract = {Purpose + Few studies have evaluated real-world performance of radiological AI-tools in clinical practice. Over one-year, we prospectively evaluated the use of AI software to support the detection of intracranial large vessel occlusions (LVO) on CT angiography (CTA). + Method + Quantitative measures (user log-in attempts, AI standalone performance) and qualitative data (user surveys) were reviewed by a key-user group at three timepoints. A total of 491 CTA studies of 460 patients were included for analysis. + Results + The overall accuracy of the AI-tool for LVO detection and localization was 87.6\%, sensitivity 69.1\% and specificity 91.2\%. Out of 81 LVOs, 31 of 34 (91\%) M1 occlusions were detected correctly, 19 of 38 (50\%) M2 occlusions, and 6 of 9 (67\%) ICA occlusions. The product was considered user-friendly. The diagnostic confidence of the users for LVO detection remained the same over the year. The last measured net promotor score was -56\%. The use of the AI-tool fluctuated over the year with a declining trend. + Conclusions + Our pragmatic approach of evaluating the AI-tool used in clinical practice, helped us to monitor the usage, to estimate the perceived added value by the users of the AI-tool, and to make an informed decision about the continuation of the use of the AI-tool.}, + citation-count = {0}, + file = {Leeu23b.pdf:pdf\Leeu23b.pdf:PDF}, + journal = {Heliyon}, + issue = {8}, + volume = {9}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {c2586bb2f434a12ef173271af12a69ed86b74944}, + all_ss_ids = {['c2586bb2f434a12ef173271af12a69ed86b74944']}, + gscites = {0}, +} + +@phdthesis{Leeu23c, + author = {Kicky G. van Leeuwen}, + title = {Validation and implementation of commercial artificial intelligence software for radiology}, + url = {https://repository.ubn.ru.nl/handle/2066/295128}, + abstract = {The aim of this thesis is to increase transparency of the AI software applications for the radiology market: the medical specialty which currently covers 75% of all approved medical AI software. The focus is on products available for clinical use in Europe, in other words, products that are CE marked. We discuss the potential use cases of AI in radiology, map commercially available AI products, independently assess the performance of products, and measure and model the (potential) added value. With the insights we have gained and publicly shared, we enable more informed decision-making by AI purchasers, users, investors, and creators. Furthermore, it encourages use and development of AI that is safe and of value to society. + The key contributions of this research are: + - Three years of publicly sharing of information to a global audience on commercially available AI products, verified regulatory clearance information, product specifications, and scientific evidence, through www.AIforRadiology.com and associated monthly newsletter. + - Initiating the Dutch Radiology AI-network connecting "AI-champions" among hospitals to share experiences and to enable the yearly inquiry on the clinical use of commercial AI. + - Development of a framework for the independent and objective validation of commercially available AI products and applying this to ten products, for two different use cases, on data from seven medical centers. With this framework, we make validation more efficient and impartial, enabling informed purchasing or reimbursement decisions. + - One of the first demonstrations of how an early health technology assessment can be performed to demonstrate the value of an AI product before implementation.}, + copromotor = {M.J.C.M. Rutten, Dr. M. de Rooij, S. Schalekamp}, + file = {:pdf/Leeu23c.pdf:PDF}, + journal = {PhD thesis}, optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken}, + school = {Radboud University, Nijmegen}, + year = {2023}, } @article{Leij17, @@ -13363,18 +16592,54 @@ @article{Leij17 pages = {1569-1577}, doi = {10.1212/WNL.0000000000004490}, abstract = {Objective: To investigate the temporal dynamics of cerebral small vessel disease (SVD) by 3 consecutive assessments over a period of 9 years, distinguishing progression from regression. - - Methods: Changes in SVD markers of 276 participants of the Radboud University Nijmegen Diffusion Tensor and Magnetic Resonance Imaging Cohort (RUN DMC) cohort were assessed at 3 time points over 9 years. We assessed white matter hyperintensities (WMH) volume by semiautomatic segmentation and rated lacunes and microbleeds manually. We categorized baseline WMH severity as mild, moderate, or severe according to the modified Fazekas scale. We performed mixed-effects regression analysis including a quadratic term for increasing age. - - Results: Mean WMH progression over 9 years was 4.7 mL (0.54 mL/y; interquartile range 0.95-5.5 mL), 20.3% of patients had incident lacunes (2.3%/y), and 18.9% had incident microbleeds (2.2%/y). WMH volume declined in 9.4% of the participants during the first follow-up interval, but only for 1 participant (0.4%) throughout the whole follow-up. Lacunes disappeared in 3.6% and microbleeds in 5.7% of the participants. WMH progression accelerated over time: including a quadratic term for increasing age during follow-up significantly improved the model (p < 0.001). SVD progression was predominantly seen in participants with moderate to severe WMH at baseline compared to those with mild WMH (odds ratio [OR] 35.5, 95% confidence interval [CI] 15.8-80.0, p < 0.001 for WMH progression; OR 5.7, 95% CI 2.8-11.2, p < 0.001 for incident lacunes; and OR 2.9, 95% CI 1.4-5.9, p = 0.003 for incident microbleeds). - - Conclusions: SVD progression is nonlinear, accelerating over time, and a highly dynamic process, with progression interrupted by reduction in some, in a population that on average shows progression.}, + + Methods: Changes in SVD markers of 276 participants of the Radboud University Nijmegen Diffusion Tensor and Magnetic Resonance Imaging Cohort (RUN DMC) cohort were assessed at 3 time points over 9 years. We assessed white matter hyperintensities (WMH) volume by semiautomatic segmentation and rated lacunes and microbleeds manually. We categorized baseline WMH severity as mild, moderate, or severe according to the modified Fazekas scale. We performed mixed-effects regression analysis including a quadratic term for increasing age. + + Results: Mean WMH progression over 9 years was 4.7 mL (0.54 mL/y; interquartile range 0.95-5.5 mL), 20.3% of patients had incident lacunes (2.3%/y), and 18.9% had incident microbleeds (2.2%/y). WMH volume declined in 9.4% of the participants during the first follow-up interval, but only for 1 participant (0.4%) throughout the whole follow-up. Lacunes disappeared in 3.6% and microbleeds in 5.7% of the participants. WMH progression accelerated over time: including a quadratic term for increasing age during follow-up significantly improved the model (p < 0.001). SVD progression was predominantly seen in participants with moderate to severe WMH at baseline compared to those with mild WMH (odds ratio [OR] 35.5, 95% confidence interval [CI] 15.8-80.0, p < 0.001 for WMH progression; OR 5.7, 95% CI 2.8-11.2, p < 0.001 for incident lacunes; and OR 2.9, 95% CI 1.4-5.9, p = 0.003 for incident microbleeds). + + Conclusions: SVD progression is nonlinear, accelerating over time, and a highly dynamic process, with progression interrupted by reduction in some, in a population that on average shows progression.}, file = {Leij17.pdf:pdf\\Leij17.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {28878046}, month = {9}, gsid = {18114267648681255154}, - gscites = {38}, + gscites = {89}, + ss_id = {f8c72c5da56aec5e573cc9e4a56d2a53b151e5a0}, + all_ss_ids = {['f8c72c5da56aec5e573cc9e4a56d2a53b151e5a0']}, +} + +@article{Leij18, + author = {van Leijsen, Esther M. C. and Tay, Jonathan and van Uden, Ingeborg W. M. and Kooijmans, Eline C. M. and Bergkamp, Mayra I. and van der Holst, Helena M. and Ghafoorian, Mohsen and Platel, Bram and Norris, David G. and Kessels, Roy P. C. and Markus, Hugh S. and Tuladhar, Anil M. and de Leeuw, Frank-Erik}, + title = {~~Memory decline in elderly with cerebral small vessel disease explained by temporal interactions between white matter hyperintensities and hippocampal atrophy}, + doi = {10.1002/hipo.23039}, + year = {2018}, + abstract = {AbstractWhite matter hyperintensities (WMH) constitute the visible spectrum of cerebral small vessel disease (SVD) markers and are associated with cognitive decline, although they do not fully account for memory decline observed in individuals with SVD. We hypothesize that WMH might exert their effect on memory decline indirectly by affecting remote brain structures such as the hippocampus. We investigated the temporal interactions between WMH, hippocampal atrophy and memory decline in older adults with SVD. Five hundred and three participants of the RUNDMC study underwent neuroimaging and cognitive assessments up to 3 times over 8.7 years. We assessed WMH volumes semi-automatically and calculated hippocampal volumes (HV) using FreeSurfer. We used linear mixed effects models and causal mediation analyses to assess both interaction and mediation effects of hippocampal atrophy in the associations between WMH and memory decline, separately for working memory (WM) and episodic memory (EM). Linear mixed effect models revealed that the interaction between WMH and hippocampal volumes explained memory decline (WM: b = .067; 95%CI[.024-0.111]; p < .01; EM: b = .061; 95%CI[.025-.098]; p < .01), with better model fit when the WMH*HV interaction term was added to the model, for both WM (likelihood ratio test, kh2[1] = 9.3, p < .01) and for EM (likelihood ratio test, kh2[1] = 10.7, p < .01). Mediation models showed that both baseline WMH volume (b = -.170; p = .001) and hippocampal atrophy (b = 0.126; p = .009) were independently related to EM decline, but the effect of baseline WMH on EM decline was not mediated by hippocampal atrophy (p value indirect effect: 0.572). Memory decline in elderly with SVD was best explained by the interaction of WMH and hippocampal volumes. The relationship between WMH and memory was not causally mediated by hippocampal atrophy, suggesting that memory decline during aging is a heterogeneous condition in which different pathologies contribute to the memory decline observed in elderly with SVD.}, + url = {http://dx.doi.org/10.1002/hipo.23039}, + file = {Leij18.pdf:pdf\\Leij18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Hippocampus}, + automatic = {yes}, + citation-count = {24}, + pages = {500-510}, + volume = {29}, + pmid = {29724890}, +} + +@article{Leij18, + author = {van Leijsen, Esther M. C. and Tay, Jonathan and van Uden, Ingeborg W. M. and Kooijmans, Eline C. M. and Bergkamp, Mayra I. and van der Holst, Helena M. and Ghafoorian, Mohsen and Platel, Bram and Norris, David G. and Kessels, Roy P. C. and Markus, Hugh S. and Tuladhar, Anil M. and de Leeuw, Frank-Erik}, + title = {~~Memory decline in elderly with cerebral small vessel disease explained by temporal interactions between white matter hyperintensities and hippocampal atrophy}, + doi = {10.1002/hipo.23039}, + year = {2018}, + abstract = {AbstractWhite matter hyperintensities (WMH) constitute the visible spectrum of cerebral small vessel disease (SVD) markers and are associated with cognitive decline, although they do not fully account for memory decline observed in individuals with SVD. We hypothesize that WMH might exert their effect on memory decline indirectly by affecting remote brain structures such as the hippocampus. We investigated the temporal interactions between WMH, hippocampal atrophy and memory decline in older adults with SVD. Five hundred and three participants of the RUNDMC study underwent neuroimaging and cognitive assessments up to 3 times over 8.7 years. We assessed WMH volumes semi-automatically and calculated hippocampal volumes (HV) using FreeSurfer. We used linear mixed effects models and causal mediation analyses to assess both interaction and mediation effects of hippocampal atrophy in the associations between WMH and memory decline, separately for working memory (WM) and episodic memory (EM). Linear mixed effect models revealed that the interaction between WMH and hippocampal volumes explained memory decline (WM: b = .067; 95%CI[.024-0.111]; p < .01; EM: b = .061; 95%CI[.025-.098]; p < .01), with better model fit when the WMH*HV interaction term was added to the model, for both WM (likelihood ratio test, kh2[1] = 9.3, p < .01) and for EM (likelihood ratio test, kh2[1] = 10.7, p < .01). Mediation models showed that both baseline WMH volume (b = -.170; p = .001) and hippocampal atrophy (b = 0.126; p = .009) were independently related to EM decline, but the effect of baseline WMH on EM decline was not mediated by hippocampal atrophy (p value indirect effect: 0.572). Memory decline in elderly with SVD was best explained by the interaction of WMH and hippocampal volumes. The relationship between WMH and memory was not causally mediated by hippocampal atrophy, suggesting that memory decline during aging is a heterogeneous condition in which different pathologies contribute to the memory decline observed in elderly with SVD.}, + url = {http://dx.doi.org/10.1002/hipo.23039}, + file = {Leij18.pdf:pdf\\Leij18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Hippocampus}, + automatic = {yes}, + citation-count = {24}, + pages = {500-510}, + volume = {29}, + pmid = {29724890}, } @article{Leij19, @@ -13392,7 +16657,9 @@ @article{Leij19 optnote = {DIAG}, pmid = {31165098}, gsid = {12830074869247778014}, - gscites = {5}, + gscites = {12}, + ss_id = {c321fef685aa56d1c5a49d01e6de616f191011be}, + all_ss_ids = {['c321fef685aa56d1c5a49d01e6de616f191011be']}, } @article{Leli08, @@ -13409,7 +16676,9 @@ @article{Leli08 pmid = {18457986}, month = {12}, gsid = {538772308921121002}, - gscites = {1}, + gscites = {5}, + ss_id = {1bbe4a605e4889376878540c245a351aa83d9716}, + all_ss_ids = {['1bbe4a605e4889376878540c245a351aa83d9716']}, } @article{Lens16, @@ -13424,9 +16693,39 @@ @article{Lens16 pmid = {27885864}, publisher = {Taylor \& Francis}, month = {11}, + ss_id = {2d3e4695c85c92419e3b715fa385b33568c06cd8}, + all_ss_ids = {['2d3e4695c85c92419e3b715fa385b33568c06cd8']}, + gscites = {7}, } -@article{Lepp95, +@conference{Leon23, + author = {Leon-Ferre, Roberto A. and Carter, Jodi M. and Zahrieh, David and Sinnwell, Jason P. and Salgado, Roberto and Suman, Vera and Hillman, David and Boughey, Judy C. and Kalari, Krishna R. and Couch, Fergus J. and Ingle, James N. and Balkenkohl, Maschenka and Ciompi, Francesco and van der Laak, Jeroen and Goetz, Matthew P.}, + title = {Abstract P2-11-34: Mitotic spindle hotspot counting using deep learning networks is highly associated with clinical outcomes in patients with early-stage triple-negative breast cancer who did not receive systemic therapy}, + doi = {10.1158/1538-7445.sabcs22-p2-11-34}, + year = {2023}, + abstract = {Abstract + Background: Triple-negative breast cancers (TNBC) exhibit high rates of recurrence and mortality. However, recent studies suggest that a subset of patients (pts) with early-stage TNBC enriched in tumor-infiltrating lymphocytes (TILs) have excellent clinical outcomes even in the absence of systemic therapy. Additional histological biomarkers that could identify pts for future systemic therapy escalation/de-escalation strategies are of great interest. TNBC are frequently highly proliferative with abundant mitoses. However, classic markers of proliferation (manual mitosis counting and Ki-67) appear to offer no prognostic value. Here, we evaluated the prognostic effects of automated mitotic spindle hotspot (AMSH) counting on RFS in independent cohorts of systemically untreated early-stage TNBC. + Methods: AMSH counting was conducted with a state-of-the-art deep learning algorithm trained on the detection of mitoses within 2 mm2 areas with the highest mitotic density (i.e. hotspots) in digital H&E images. Details of the development, training and validation of the algorithm were published previously [1] in a cohort of unselected TNBC. We obtained AMSH counts in a centrally confirmed TNBC cohort from Mayo Clinic [2] and focused our analysis on pts who received locoregional therapy but no systemic therapy. Using a fractional polynomial analysis with a multivariable proportional hazards regression model, we confirmed the assumption of linearity in the log hazard for the continuous variable AMSH and evaluated whether AMSH counts were prognostic of RFS. We corroborated our findings in an independent cohort of systemically untreated TNBC pts from the Radboud University Medical Center in the Netherlands (Radboud Cohort). Results are reported at a median follow-up of 8.1 and 6.7 years for the Mayo and Netherlands cohorts, respectively. + Results: Among 182 pts with who did not receive systemic therapy in the Mayo Cohort, 140 (77\%) with available AMSH counts were included. The mean age was 61 (range: 31-94), 71\% were postmenopausal, 67\% had tumors <= 2cm, and 83\% were node-negative. As expected, most tumors were Nottingham grade 3 (84\%) and had a high Ki-67 proliferation index (54\% with Ki-67 &gt;30\%). Most tumors (73\%) had stromal TILs <= 30\%. The median AMSH count was 18 (IQR: 8, 42). AMSH counts were linearly associated with grade and tumor size, with the proportion of pts with grade 3 tumors and size &gt; 2 cm increasing as the AMSH counts increased (p=0.007 and p=0.059, respectively). In a multivariate model controlling for nodal status, tumor size, and stromal TILs, AMSH counts were independently associated with RFS (p&lt; 0.0001). For every 10-point increase in the AMSH count, we observed a 17\% increase in the risk of experiencing an RFS event (HR 1.17, 95\% CI 1.08-1.26). We corroborated our findings in the Radboud Cohort (n=126). The mean age was 68 (range: 40-96), and 81\% were node-negative. While the median AMSH count was 36 (IQR: 16-63), higher than in the Mayo Cohort (p=0.004), the prognostic impact was similar, with a significant association between AMSH count and RFS (p=0.028) in a multivariate model corrected for nodal status, tumor size, and stromal TILs. For every 10-point increase in the AMSH count in the Netherlands cohort, we observed a 9\% increase in the risk of experiencing an RFS event (HR 1.09, 95\% CI 1.01-1.17). RFS rates according to AMSH counts for both cohorts are shown in the Table. + Conclusions: AMSH counting is a new proliferation biomarker that provides prognostic value independent of nodal status, tumor size, and stromal TILs in systemically untreated early-stage TNBC. Plans are underway to evaluate AMSH counts in additional cohorts of systemically untreated TNBC, and in other disease settings such as prior to neoadjuvant systemic therapy. If validated, this biomarker should be prospectively evaluated as a potential selection biomarker in clinical trials of systemic therapy de-escalation. + References: + 1. PMID: 29994086 + 2. PMID: 28913760 + Table RFS according to AMSH counts in the Mayo and Radboud Cohorts + Citation Format: Roberto A. Leon-Ferre, Jodi M. Carter, David Zahrieh, Jason P. Sinnwell, Roberto Salgado, Vera Suman, David Hillman, Judy C. Boughey, Krishna R. Kalari, Fergus J. Couch, James N. Ingle, Maschenka Balkenkohl, Francesco Ciompi, Jeroen van der Laak, Matthew P. Goetz. Mitotic spindle hotspot counting using deep learning networks is highly associated with clinical outcomes in patients with early-stage triple-negative breast cancer who did not receive systemic therapy [abstract]. In: Proceedings of the 2022 San Antonio Breast Cancer Symposium; 2022 Dec 6-10; San Antonio, TX. Philadelphia (PA): AACR; Cancer Res 2023;83(5 Suppl):Abstract nr P2-11-34.}, + url = {http://dx.doi.org/10.1158/1538-7445.SABCS22-P2-11-34}, + file = {Leon23.pdf:pdf\Leon23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancer Research}, + citation-count = {0}, + automatic = {yes}, + pages = {P2-11-34-P2-11-34}, + volume = {83}, + all_ss_ids = {6ab3424f10b236c992d823568dfca2075e2ad46e}, + gscites = {0}, +} + +@article{Lepp95, author = {Leppert, A. G. and Prokop, M. and Schaefer-Prokop, C. M. and Galanski, M.}, title = {Detection of simulated chest lesions: comparison of a conventional screen-film combination, an asymmetric screen-film system, and storage phosphor radiography}, journal = Radiology, @@ -13457,7 +16756,9 @@ @inproceedings{Lesn11 number = {1}, month = {3}, gsid = {5157723055013579670}, - gscites = {17}, + gscites = {18}, + ss_id = {01491b1e830ef88a3e39a06313bc27390167fd86}, + all_ss_ids = {['01491b1e830ef88a3e39a06313bc27390167fd86']}, } @inproceedings{Lesn12, @@ -13473,7 +16774,9 @@ @inproceedings{Lesn12 file = {Lesn12.pdf:pdf\\Lesn12.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {9813410708328583159}, - gscites = {1}, + gscites = {5}, + ss_id = {a37cda5126f5024777d9440c5f567c9e5baae4ce}, + all_ss_ids = {['a37cda5126f5024777d9440c5f567c9e5baae4ce']}, } @article{Lesn12a, @@ -13491,7 +16794,9 @@ @article{Lesn12a pmid = {22853938}, month = {8}, gsid = {683635356274851558}, - gscites = {22}, + gscites = {23}, + ss_id = {1d8b4341ee856c294dc6c3d13e8cb01ab8dc44a4}, + all_ss_ids = {['1d8b4341ee856c294dc6c3d13e8cb01ab8dc44a4']}, } @article{Less13, @@ -13523,17 +16828,19 @@ @inproceedings{Less16 pages = {978511-1 -- 978511-6}, doi = {10.1117/12.2216978}, abstract = {The amount of calcifications in the coronary arteries is a powerful and independent predictor of cardiovascular events and is used to identify subjects at high risk who might benefit from preventive treatment. Routine quantification of coronary calcium scores can complement screening programs using low-dose chest CT, such as lung cancer screening. We present a system for automatic coronary calcium scoring based on deep convolutional neural networks (CNNs). - - The system uses three independently trained CNNs to estimate a bounding box around the heart. In this region of interest, connected components above 130 HU are considered candidates for coronary artery calcifications. To separate them from other high intensity lesions, classification of all extracted voxels is performed by feeding two-dimensional 50 mm x 50 mm patches from three orthogonal planes into three concurrent CNNs. The networks consist of three convolutional layers and one fully-connected layer with 256 neurons. - - In the experiments, 1028 non-contrast-enhanced and non-ECG-triggered low-dose chest CT scans were used. The network was trained on 797 scans. In the remaining 231 test scans, the method detected on average 194.3 mm3 of 199.8 mm3 coronary calcifications per scan (sensitivity 97.2%) with an average false-positive volume of 10.3 mm3. Subjects were assigned to one of five standard cardiovascular risk categories based on the Agatston score. Accuracy of risk category assignment was 84.4% with a linearly weighted kappa of 0.89. - - The proposed system can perform automatic coronary artery calcium scoring to identify subjects undergoing low-dose chest CT screening who are at risk of cardiovascular events with high accuracy.}, + + The system uses three independently trained CNNs to estimate a bounding box around the heart. In this region of interest, connected components above 130 HU are considered candidates for coronary artery calcifications. To separate them from other high intensity lesions, classification of all extracted voxels is performed by feeding two-dimensional 50 mm x 50 mm patches from three orthogonal planes into three concurrent CNNs. The networks consist of three convolutional layers and one fully-connected layer with 256 neurons. + + In the experiments, 1028 non-contrast-enhanced and non-ECG-triggered low-dose chest CT scans were used. The network was trained on 797 scans. In the remaining 231 test scans, the method detected on average 194.3 mm3 of 199.8 mm3 coronary calcifications per scan (sensitivity 97.2%) with an average false-positive volume of 10.3 mm3. Subjects were assigned to one of five standard cardiovascular risk categories based on the Agatston score. Accuracy of risk category assignment was 84.4% with a linearly weighted kappa of 0.89. + + The proposed system can perform automatic coronary artery calcium scoring to identify subjects undergoing low-dose chest CT screening who are at risk of cardiovascular events with high accuracy.}, file = {Less16.pdf:pdf\\Less16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {11932712040638875797}, - gscites = {25}, + gscites = {46}, + ss_id = {b542b6df300ef39144d6172e8fe41a0b93fd5f8b}, + all_ss_ids = {['b542b6df300ef39144d6172e8fe41a0b93fd5f8b']}, } @article{Less17, @@ -13552,7 +16859,9 @@ @article{Less17 pmid = {29408789}, month = {2}, gsid = {16774069466764003485}, - gscites = {72}, + gscites = {166}, + ss_id = {1a3470626b24ccd510047925f80d21affde3c3b8}, + all_ss_ids = {['1a3470626b24ccd510047925f80d21affde3c3b8']}, } @conference{Less17a, @@ -13576,7 +16885,9 @@ @inproceedings{Less18 optnote = {DIAG}, month = {3}, gsid = {3970246513012434900}, - gscites = {21}, + gscites = {32}, + ss_id = {b41e47d9978abb56a5b0fa6697f5454e31579722}, + all_ss_ids = {['b41e47d9978abb56a5b0fa6697f5454e31579722']}, } @inproceedings{Less18a, @@ -13588,6 +16899,8 @@ @inproceedings{Less18a abstract = {Precise segmentation of the vertebrae is often required for automatic detection of vertebral abnormalities. This especially enables incidental detection of abnormalities such as compression fractures in images that were acquired for other diagnostic purposes. While many CT and MR scans of the chest and abdomen cover a section of the spine, they often do not cover the entire spine. Additionally, the first and last visible vertebrae are likely only partially included in such scans. In this paper, we therefore approach vertebra segmentation as an instance segmentation problem. A fully convolutional neural network is combined with an instance memory that retains information about already segmented vertebrae. This network iteratively analyzes image patches, using the instance memory to search for and segment the first not yet segmented vertebra. At the same time, each vertebra is classified as completely or partially visible, so that partially visible vertebrae can be excluded from further analyses. We evaluated this method on spine CT scans from a vertebra segmentation challenge and on low-dose chest CT scans. The method achieved an average Dice score of 95.8% and 92.1%, respectively, and a mean absolute surface distance of 0.194 mm and 0.344 mm.}, file = {Less18a.pdf:pdf\\Less18a.pdf:PDF}, optnote = {DIAG}, + all_ss_ids = {['14fcdfdd2b15f6fec9b9e7b1b4189e43281273d8', '77ca4f86eaef55fb7c853dad2fd3600a3eb5b169']}, + gscites = {176}, } @article{Less19, @@ -13604,8 +16917,10 @@ @article{Less19 pmid = {30660540}, month = {9}, gsid = {13396141710348170095}, - gscites = {6}, + gscites = {16}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/208061}, + ss_id = {31d77e71f4151452d18b69a5649a64df561f6af7}, + all_ss_ids = {['31d77e71f4151452d18b69a5649a64df561f6af7']}, } @article{Less19a, @@ -13623,8 +16938,10 @@ @article{Less19a pmid = {30771712}, month = {4}, gsid = {1055664958726725388}, - gscites = {29}, + gscites = {174}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/202066}, + ss_id = {77ca4f86eaef55fb7c853dad2fd3600a3eb5b169}, + all_ss_ids = {['77ca4f86eaef55fb7c853dad2fd3600a3eb5b169']}, } @phdthesis{Less19b, @@ -13639,6 +16956,8 @@ @phdthesis{Less19b promotor = {M. A. Viergever, B. van Ginneken, P. A. de Jong}, school = {Utrecht University}, journal = {PhD thesis}, + all_ss_ids = {42315be6452636824d9004d5d8aa2fe8924494a2}, + gscites = {0}, } @inproceedings{Less19c, @@ -13652,6 +16971,8 @@ @inproceedings{Less19c optnote = {DIAG, RADIOLOGY}, gsid = {14928274104806083629}, gscites = {1}, + ss_id = {47d4ee03c5b2596dbfcfb4373de7e05e1cd831dc}, + all_ss_ids = {['47d4ee03c5b2596dbfcfb4373de7e05e1cd831dc']}, } @article{Less20, @@ -13669,6 +16990,9 @@ @article{Less20 abstract = {The coronavirus disease 2019 (COVID-19) pandemic has spread across the globe with alarming speed, morbidity, and mortality. Immediate triage of patients with chest infections suspected to be caused by COVID-19 using chest CT may be of assistance when results from definitive viral testing are delayed. Purpose: To develop and validate an artificial intelligence (AI) system to score the likelihood and extent of pulmonary COVID-19 on chest CT scans using the COVID-19 Reporting and Data System (CO-RADS) and CT severity scoring systems. Materials and Methods: The CO-RADS AI system consists of three deep-learning algorithms that automatically segment the five pulmonary lobes, assign a CO-RADS score for the suspicion of COVID-19, and assign a CT severity score for the degree of parenchymal involvement per lobe. This study retrospectively included patients who underwent a nonenhanced chest CT examination because of clinical suspicion of COVID-19 at two medical centers. The system was trained, validated, and tested with data from one of the centers. Data from the second center served as an external test set. Diagnostic performance and agreement with scores assigned by eight independent observers were measured using receiver operating characteristic analysis, linearly weighted k values, and classification accuracy. Results: A total of 105 patients (mean age, 62 years +- 16 [standard deviation]; 61 men) and 262 patients (mean age, 64 years +- 16; 154 men) were evaluated in the internal and external test sets, respectively. The system discriminated between patients with COVID-19 and those without COVID-19, with areas under the receiver operating characteristic curve of 0.95 (95% CI: 0.91, 0.98) and 0.88 (95% CI: 0.84, 0.93), for the internal and external test sets, respectively. Agreement with the eight human observers was moderate to substantial, with mean linearly weighted k values of 0.60 +- 0.01 for CO-RADS scores and 0.54 +- 0.01 for CT severity scores. Conclusion: With high diagnostic performance, the CO-RADS AI system correctly identified patients with COVID-19 using chest CT scans and assigned standardized CO-RADS and CT severity scores that demonstrated good agreement with findings from eight independent observers and generalized well to external data.}, optnote = {DIAG, RADIOLOGY}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/228667}, + ss_id = {4330ce9c73af04a35d1a7dd366df8434c98e30ed}, + all_ss_ids = {['4330ce9c73af04a35d1a7dd366df8434c98e30ed']}, + gscites = {108}, } @article{Less20a, @@ -13678,6 +17002,9 @@ @article{Less20a year = {2020}, abstract = {Random transformations are commonly used for augmentation of the training data with the goal of reducing the uniformity of the training samples. These transformations normally aim at variations that can be expected in images from the same modality. Here, we propose a simple method for transforming the gray values of an image with the goal of reducing cross modality differences. This approach enables segmentation of the lumbar vertebral bodies in CT images using a network trained exclusively with MR images. The source code is made available at https://github.com/nlessmann/rsgt}, file = {:http\://arxiv.org/pdf/2003.06158v1:PDF}, + ss_id = {d9eec89c003fb7af39968663ba21db04098d6019}, + all_ss_ids = {['d9eec89c003fb7af39968663ba21db04098d6019']}, + gscites = {0}, } @article{Less22, @@ -13690,6 +17017,9 @@ @article{Less22 title = {Automatic Brand Identification of Orthopedic Implants from Radiographs: Ready for the Next Step?}, journal = RAI, optnote = {DIAG, RADIOLOGY}, + ss_id = {91437d205e582d2d1ce2dab8e49687a2a9bb0f66}, + all_ss_ids = {['91437d205e582d2d1ce2dab8e49687a2a9bb0f66']}, + gscites = {0}, } @inproceedings{Leym96, @@ -13703,7 +17033,70 @@ @inproceedings{Leym96 file = {Leym96.pdf:pdf\\Leym96.pdf:PDF}, gsid = {12954033652655760385}, optnote = {DIAG, RADIOLOGY}, - gscites = {19}, + gscites = {22}, + ss_id = {7f897bcefc1fcc04cd17ae162d6bdfcff4dd83c0}, + all_ss_ids = {['7f897bcefc1fcc04cd17ae162d6bdfcff4dd83c0']}, +} + +@article{Li18, + author = {Li, Zhang and Hu, Zheyu and Xu, Jiaolong and Tan, Tao and Chen, Hui and Duan, Zhi and Liu, Ping and Tang, Jun and Cai, Guoping and Ouyang, Quchang and Tang, Yuling and Litjens, Geert and Li, Qiang}, + title = {~~Computer-aided diagnosis of lung carcinoma using deep learning - a pilot study}, + doi = {10.48550/ARXIV.1803.05471}, + year = {2018}, + abstract = {Aim: Early detection and correct diagnosis of lung cancer are the most important steps in improving patient outcome. This study aims to assess which deep learning models perform best in lung cancer diagnosis. Methods: Non-small cell lung carcinoma and small cell lung carcinoma biopsy specimens were consecutively obtained and stained. The specimen slides were diagnosed by two experienced pathologists (over 20 years). Several deep learning models were trained to discriminate cancer and non-cancer biopsies. Result: Deep learning models give reasonable AUC from 0.8810 to 0.9119. Conclusion: The deep learning analysis could help to speed up the detection process for the whole-slide image (WSI) and keep the comparable detection rate with human observer.}, + url = {https://arxiv.org/abs/1803.05471}, + file = {Li18.pdf:pdf\\Li18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:1803.05471}, + automatic = {yes}, +} + +@article{Li21, + author = {Li, Zhang and Zhang, Jiehua and Tan, Tao and Teng, Xichao and Sun, Xiaoliang and Zhao, Hong and Liu, Lihong and Xiao, Yang and Lee, Byungjae and Li, Yilong and Zhang, Qianni and Sun, Shujiao and Zheng, Yushan and Yan, Junyu and Li, Ni and Hong, Yiyu and Ko, Junsu and Jung, Hyun and Liu, Yanling and Chen, Yu-cheng and Wang, Ching-wei and Yurovskiy, Vladimir and Maevskikh, Pavel and Khanagha, Vahid and Jiang, Yi and Yu, Li and Liu, Zhihong and Li, Daiqiang and Schuffler, Peter J. and Yu, Qifeng and Chen, Hui and Tang, Yuling and Litjens, Geert}, + title = {~~Deep Learning Methods for Lung Cancer Segmentation in Whole-Slide Histopathology Images--The ACDC@LungHP Challenge 2019}, + doi = {10.1109/jbhi.2020.3039741}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/JBHI.2020.3039741}, + file = {Li21.pdf:pdf\\Li21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {IEEE Journal of Biomedical and Health Informatics}, + automatic = {yes}, + citation-count = {41}, + pages = {429-440}, + volume = {25}, + pmid = {33216724}, +} + +@inproceedings{Li22, + author = {Li, Yiwen and Fu, Yunguan and Yang, Qianye and Min, Zhe and Yan, Wen and Huisman, Henkjan and Barratt, Dean and Prisacariu, Victor Adrian and Hu, Yipeng}, + title = {~~FEW-SHOT Image Segmentation for Cross-Institution Male Pelvic Organs Using Registration-Assisted Prototypical Learning}, + doi = {10.1109/isbi52829.2022.9761453}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/ISBI52829.2022.9761453}, + file = {Li22.pdf:pdf\\Li22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {2022 IEEE 19th International Symposium on Biomedical Imaging (ISBI)}, + automatic = {yes}, + citation-count = {2}, +} + +@article{Li23, + author = {Li, Yiwen and Fu, Yunguan and Gayo, Iani J.M.B. and Yang, Qianye and Min, Zhe and Saeed, Shaheer U. and Yan, Wen and Wang, Yipei and Noble, J. Alison and Emberton, Mark and Clarkson, Matthew J. and Huisman, Henkjan and Barratt, Dean C. and Prisacariu, Victor A. and Hu, Yipeng}, + title = {~~Prototypical few-shot segmentation for cross-institution male pelvic structures with spatial registration}, + doi = {10.1016/j.media.2023.102935}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2023.102935}, + file = {Li23.pdf:pdf\\Li23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + automatic = {yes}, + citation-count = {0}, + pages = {102935}, + volume = {90}, + pmid = {37716198}, } @conference{Lief16, @@ -13712,22 +17105,22 @@ @conference{Lief16 booktitle = {4th International Congress on OCT Angiography and Advances in OCT}, year = {2016}, abstract = {Purpose - : - A tool for grading micro-aneurysms and the Foveal Avascular Zone (FAZ) in Fluorescein Angiography (FA) and OCT Angiography (OCTA) has been developed. With this tool the user can compare visibility and grade micro-aneurysms by displaying early FA, late FA and inner, intermediate and outer OCTA images in a synchronized view. - - Methods - : - The user can register the images in two steps by clicking on corresponding landmarks: early and late FA should be registered, as well as early FA to OCTA. A least-squares approximation to the affine transform that best matches the annotated point sets is calculated. Visual feedback is available during this stage by blending the images that need to be registered. - Once the images are registered, a synchronized cursor helps the user in finding and comparing micro-aneurysms in all five images. The FAZ, for which the area is automatically calculated, can be drawn onto each image as well. - - Results - : - - Early and late FA and OCTA images, segmented into an inner, intermediate and outer layer, have been acquired for 31 eyes of 24 patients with Diabetic Macular Edema (DME). In every set of images, enough landmarks could be found for successful registration. The affine transform was sufficiently accurate to compare micro-aneurysms in the different images. The tool has been used for grading visibility and leakage of 567 micro-aneurysms. The FAZ could be delineated accurately in each image except the late FA where it was not visible. - - Conclusion - : - We developed a tool that can help researchers in comparing properties of FA and OCTA images, by registration of 5 different images (early and late FA, inner, intermediate and outer OCTA). The tool has been used for grading micro-aneurysms and delineating the FAZ for patients with DME.}, + : + A tool for grading micro-aneurysms and the Foveal Avascular Zone (FAZ) in Fluorescein Angiography (FA) and OCT Angiography (OCTA) has been developed. With this tool the user can compare visibility and grade micro-aneurysms by displaying early FA, late FA and inner, intermediate and outer OCTA images in a synchronized view. + + Methods + : + The user can register the images in two steps by clicking on corresponding landmarks: early and late FA should be registered, as well as early FA to OCTA. A least-squares approximation to the affine transform that best matches the annotated point sets is calculated. Visual feedback is available during this stage by blending the images that need to be registered. + Once the images are registered, a synchronized cursor helps the user in finding and comparing micro-aneurysms in all five images. The FAZ, for which the area is automatically calculated, can be drawn onto each image as well. + + Results + : + + Early and late FA and OCTA images, segmented into an inner, intermediate and outer layer, have been acquired for 31 eyes of 24 patients with Diabetic Macular Edema (DME). In every set of images, enough landmarks could be found for successful registration. The affine transform was sufficiently accurate to compare micro-aneurysms in the different images. The tool has been used for grading visibility and leakage of 567 micro-aneurysms. The FAZ could be delineated accurately in each image except the late FA where it was not visible. + + Conclusion + : + We developed a tool that can help researchers in comparing properties of FA and OCTA images, by registration of 5 different images (early and late FA, inner, intermediate and outer OCTA). The tool has been used for grading micro-aneurysms and delineating the FAZ for patients with DME.}, optnote = {DIAG, RADIOLOGY}, } @@ -13746,6 +17139,8 @@ @inproceedings{Lief17 month = {2}, gsid = {3861077382929820883}, gscites = {4}, + ss_id = {e4010e494f6a7f0ea93b4cdb257aa1e51611609a}, + all_ss_ids = {['e4010e494f6a7f0ea93b4cdb257aa1e51611609a']}, } @conference{Lief17a, @@ -13753,15 +17148,15 @@ @conference{Lief17a booktitle = ARVO, title = {Automatic detection of the foveal center in optical coherence tomography}, abstract = {Purpose : To aautomatically detect the foveal center in optical coherence tomography (OCT) scans in order to obtain an accurate and reliable reference for the assessment of various structural biomarkers, even in the presence of large abnormalities and across different scanning protocols. - - Methods : 1784 OCT scans were used for the development of the proposed automatic method: 1744 scans from the European Genetic Database (EUGENDA) acquired with a Heidelberg Spectralis HRA+OCT 1 scanner and 40 scans from a publicly available dataset [1] acquired with a Bioptigen scanner. Two independent sets, with different levels of age-related macular degeneration (AMD) were drawn from the same databases for evaluation: 100 scans from EUGENDA (Set A, 25 control patients and 25 for each of the AMD severity levels early, intermediate and advanced) and 100 scans from [1] (Set B, 50 control, 50 AMD). - A fully convolutional neural network based on stacked layers of dilated convolutions was trained to classify each pixel in a B-scan by assigning a probability of belonging to the fovea. The network was applied to every B-scan in the OCT volume, and the final foveal center was defined as the pixel with maximum assigned probability. An initial network was trained on the 1744 training scans from EUGENDA and optimized with the 40 training scans acquired with the Bioptigen scanner, to specialize for different levels of noise and contrast. - - For all scans manual annotations were available as reference for evaluation. The foveal center was considered correctly identified if the distance between the prediction and the reference was less than the foveal radius, i.e. 750 mm. - - Results : The foveal center was correctly detected in 95 OCT scans in Set A (24 control, 24 early, 25 intermediate, 22 advanced). The mean distance error was 63.7 mm with 81 detections inside a radius of 175 mm (the foveola) and 70 inside a radius of 75 mm (the umbo). In Set B, the foveal center was correctly identified in 96 OCT scans (49 control, 47 AMD). The mean distance error was 88.6 mm with 82 detections inside the foveola and 61 inside the umbo. - - Conclusions : The proposed automatic method performed accurately for both healthy retinas and retinas affected by AMD. The method can be applied successfully to scans from different vendors, thus providing a reliable reference location for the assessment of structural biomarkers in OCT.}, + + Methods : 1784 OCT scans were used for the development of the proposed automatic method: 1744 scans from the European Genetic Database (EUGENDA) acquired with a Heidelberg Spectralis HRA+OCT 1 scanner and 40 scans from a publicly available dataset [1] acquired with a Bioptigen scanner. Two independent sets, with different levels of age-related macular degeneration (AMD) were drawn from the same databases for evaluation: 100 scans from EUGENDA (Set A, 25 control patients and 25 for each of the AMD severity levels early, intermediate and advanced) and 100 scans from [1] (Set B, 50 control, 50 AMD). + A fully convolutional neural network based on stacked layers of dilated convolutions was trained to classify each pixel in a B-scan by assigning a probability of belonging to the fovea. The network was applied to every B-scan in the OCT volume, and the final foveal center was defined as the pixel with maximum assigned probability. An initial network was trained on the 1744 training scans from EUGENDA and optimized with the 40 training scans acquired with the Bioptigen scanner, to specialize for different levels of noise and contrast. + + For all scans manual annotations were available as reference for evaluation. The foveal center was considered correctly identified if the distance between the prediction and the reference was less than the foveal radius, i.e. 750 mm. + + Results : The foveal center was correctly detected in 95 OCT scans in Set A (24 control, 24 early, 25 intermediate, 22 advanced). The mean distance error was 63.7 mm with 81 detections inside a radius of 175 mm (the foveola) and 70 inside a radius of 75 mm (the umbo). In Set B, the foveal center was correctly identified in 96 OCT scans (49 control, 47 AMD). The mean distance error was 88.6 mm with 82 detections inside the foveola and 61 inside the umbo. + + Conclusions : The proposed automatic method performed accurately for both healthy retinas and retinas affected by AMD. The method can be applied successfully to scans from different vendors, thus providing a reliable reference location for the assessment of structural biomarkers in OCT.}, optnote = {DIAG, RADIOLOGY}, year = {2017}, } @@ -13783,7 +17178,9 @@ @article{Lief17b pmid = {29188111}, publisher = {OSA}, gsid = {16004483321806977582}, - gscites = {13}, + gscites = {29}, + ss_id = {e5fded74d443402884e88d8f7952d34d5371dedf}, + all_ss_ids = {['e5fded74d443402884e88d8f7952d34d5371dedf']}, } @inproceedings{Lief19, @@ -13801,7 +17198,9 @@ @inproceedings{Lief19 file = {Lief19.pdf:pdf\\Lief19.pdf:PDF}, optnote = {DIAG}, gsid = {2472537240832249962}, - gscites = {2}, + gscites = {11}, + ss_id = {40bf7af2b9d79960dc127d03c2a83ce480d4d3ad}, + all_ss_ids = {['40bf7af2b9d79960dc127d03c2a83ce480d4d3ad']}, } @conference{Lief19a, @@ -13809,19 +17208,21 @@ @conference{Lief19a booktitle = ARVO, title = {Prediction of areas at risk of developing geographic atrophy in color fundus images using deep learning}, abstract = {Purpose: - Exact quantification of areas of geographic atrophy (GA) can provide an important anatomical endpoint for treatment trials. The prediction of areas where GA may develop can provide valuable personalized prognosis and help in the development of targeted treatments to prevent progression and further vision loss. In this work, we present a model based on a deep convolutional neural network (CNN) that predicts the areas of GA within 5 years from baseline using color fundus (CF) images. - - Methods: - Areas of GA were delineated by 4 to 5 experienced graders in consensus in 377 CF images (252 eyes) collected from the Rotterdam Study and the Blue Mountains Eye Study. Graders made use of multimodal and follow up images when available, using our EyeNED annotation workstation. We identified 84 pairs of images (baseline and follow-up) of the same eye that were acquired with an interval of approximately 5 years. Image registration was performed by identifying corresponding landmarks between the images, allowing to project the delineated GA of the follow-up image onto the baseline image. - Next, a fully automatic segmentation model, based on a deep CNN, was developed. The CNN was trained to simultaneously segment the current GA area and the area at risk of developing GA, using only the baseline image as input. A five-fold cross-validation was performed to validate the prediction performance. - - Results: - The model achieved an average dice coefficient of 0.63 for segmentation of areas at risk of developing GA in the 84 images. The intraclass correlation coefficient between the GA area defined by the consensus grading of the follow-up image and the automatically predicted area based on the baseline image was 0.54. - - Conclusions: - We present a model based on a deep CNN that is capable of identifying areas where GA may develop from CF images. The proposed approach constitutes a step towards personalized prognosis and possible treatment decisions. Furthermore, the model may be used for automatic discovery of new predictive biomarkers for development and growth rate of GA, and may help to automatically identify individuals at risk of developing GA.}, + Exact quantification of areas of geographic atrophy (GA) can provide an important anatomical endpoint for treatment trials. The prediction of areas where GA may develop can provide valuable personalized prognosis and help in the development of targeted treatments to prevent progression and further vision loss. In this work, we present a model based on a deep convolutional neural network (CNN) that predicts the areas of GA within 5 years from baseline using color fundus (CF) images. + + Methods: + Areas of GA were delineated by 4 to 5 experienced graders in consensus in 377 CF images (252 eyes) collected from the Rotterdam Study and the Blue Mountains Eye Study. Graders made use of multimodal and follow up images when available, using our EyeNED annotation workstation. We identified 84 pairs of images (baseline and follow-up) of the same eye that were acquired with an interval of approximately 5 years. Image registration was performed by identifying corresponding landmarks between the images, allowing to project the delineated GA of the follow-up image onto the baseline image. + Next, a fully automatic segmentation model, based on a deep CNN, was developed. The CNN was trained to simultaneously segment the current GA area and the area at risk of developing GA, using only the baseline image as input. A five-fold cross-validation was performed to validate the prediction performance. + + Results: + The model achieved an average dice coefficient of 0.63 for segmentation of areas at risk of developing GA in the 84 images. The intraclass correlation coefficient between the GA area defined by the consensus grading of the follow-up image and the automatically predicted area based on the baseline image was 0.54. + + Conclusions: + We present a model based on a deep CNN that is capable of identifying areas where GA may develop from CF images. The proposed approach constitutes a step towards personalized prognosis and possible treatment decisions. Furthermore, the model may be used for automatic discovery of new predictive biomarkers for development and growth rate of GA, and may help to automatically identify individuals at risk of developing GA.}, optnote = {DIAG, RADIOLOGY}, year = {2019}, + all_ss_ids = {6d1ea27b41023f9add67e2c8c4dcbc7866ae640b}, + gscites = {0}, } @article{Lief19b, @@ -13833,6 +17234,8 @@ @article{Lief19b abstract = {Purpose: To develop and validate a deep learning model for automatic segmentation of geographic atrophy (GA) in color fundus images (CFIs) and its application to study growth rate of GA. Participants: 409 CFIs of 238 eyes with GA from the Rotterdam Study (RS) and the Blue Mountain Eye Study (BMES) for model development, and 5,379 CFIs of 625 eyes from the Age-Related Eye Disease Study (AREDS) for analysis of GA growth rate. Methods: A deep learning model based on an ensemble of encoder-decoder architectures was implemented and optimized for the segmentation of GA in CFIs. Four experienced graders delineated GA in CFIs from RS and BMES. These manual delineations were used to evaluate the segmentation model using 5-fold cross-validation. The model was further applied to CFIs from the AREDS to study the growth rate of GA. Linear regression analysis was used to study associations between structural biomarkers at baseline and GA growth rate. A general estimate of the progression of GA area over time was made by combining growth rates of all eyes with GA from the AREDS set. Results: The model obtained an average Dice coefficient of 0.72 +- 0.26 on the BMES and RS. An intraclass correlation coefficient of 0.83 was reached between the automatically estimated GA area and the graders' consensus measures. Eight automatically calculated structural biomarkers (area, filled area, convex area, convex solidity, eccentricity, roundness, foveal involvement and perimeter) were significantly associated with growth rate. Combining all growth rates indicated that GA area grows quadratically up to an area of around 12 mm2, after which growth rate stabilizes or decreases. Conclusion: The presented deep learning model allowed for fully automatic and robust segmentation of GA in CFIs. These segmentations can be used to extract structural characteristics of GA that predict its growth rate.}, optnote = {DIAG}, month = {8}, + all_ss_ids = {['4d54536ebf1b355d02cdfe742553032666101dbd']}, + gscites = {36}, } @article{Lief20, @@ -13841,25 +17244,25 @@ @article{Lief20 doi = {10.1016/j.ophtha.2020.02.009}, url = {https://arxiv.org/abs/1908.05621}, abstract = {PURPOSE: - To develop and validate a deep learning model for the automatic segmentation of geographic atrophy (GA) using color fundus images (CFIs) and its application to study the growth rate of GA. - - DESIGN: - Prospective, multicenter, natural history study with up to 15 years of follow-up. - - PARTICIPANTS: - Four hundred nine CFIs of 238 eyes with GA from the Rotterdam Study (RS) and Blue Mountain Eye Study (BMES) for model development, and 3589 CFIs of 376 eyes from the Age-Related Eye Disease Study (AREDS) for analysis of GA growth rate. - - METHODS: - Deep learning model based on an ensemble of encoder-decoder architectures was implemented and optimized for the segmentation of GA in CFIs. Four experienced graders delineated, in consensus, GA in CFIs from the RS and BMES. These manual delineations were used to evaluate the segmentation model using 5-fold cross-validation. The model was applied further to CFIs from the AREDS to study the growth rate of GA. Linear regression analysis was used to study associations between structural biomarkers at baseline and the GA growth rate. A general estimate of the progression of GA area over time was made by combining growth rates of all eyes with GA from the AREDS set. - - MAIN OUTCOME MEASURES: - Automatically segmented GA and GA growth rate. - - RESULTS: - The model obtained an average Dice coefficient of 0.72+-0.26 on the BMES and RS set while comparing the automatically segmented GA area with the graders' manual delineations. An intraclass correlation coefficient of 0.83 was reached between the automatically estimated GA area and the graders' consensus measures. Nine automatically calculated structural biomarkers (area, filled area, convex area, convex solidity, eccentricity, roundness, foveal involvement, perimeter, and circularity) were significantly associated with growth rate. Combining all growth rates indicated that GA area grows quadratically up to an area of approximately 12 mm2, after which growth rate stabilizes or decreases. - - CONCLUSIONS: - The deep learning model allowed for fully automatic and robust segmentation of GA on CFIs. These segmentations can be used to extract structural characteristics of GA that predict its growth rate.}, + To develop and validate a deep learning model for the automatic segmentation of geographic atrophy (GA) using color fundus images (CFIs) and its application to study the growth rate of GA. + + DESIGN: + Prospective, multicenter, natural history study with up to 15 years of follow-up. + + PARTICIPANTS: + Four hundred nine CFIs of 238 eyes with GA from the Rotterdam Study (RS) and Blue Mountain Eye Study (BMES) for model development, and 3589 CFIs of 376 eyes from the Age-Related Eye Disease Study (AREDS) for analysis of GA growth rate. + + METHODS: + Deep learning model based on an ensemble of encoder-decoder architectures was implemented and optimized for the segmentation of GA in CFIs. Four experienced graders delineated, in consensus, GA in CFIs from the RS and BMES. These manual delineations were used to evaluate the segmentation model using 5-fold cross-validation. The model was applied further to CFIs from the AREDS to study the growth rate of GA. Linear regression analysis was used to study associations between structural biomarkers at baseline and the GA growth rate. A general estimate of the progression of GA area over time was made by combining growth rates of all eyes with GA from the AREDS set. + + MAIN OUTCOME MEASURES: + Automatically segmented GA and GA growth rate. + + RESULTS: + The model obtained an average Dice coefficient of 0.72+-0.26 on the BMES and RS set while comparing the automatically segmented GA area with the graders' manual delineations. An intraclass correlation coefficient of 0.83 was reached between the automatically estimated GA area and the graders' consensus measures. Nine automatically calculated structural biomarkers (area, filled area, convex area, convex solidity, eccentricity, roundness, foveal involvement, perimeter, and circularity) were significantly associated with growth rate. Combining all growth rates indicated that GA area grows quadratically up to an area of approximately 12 mm2, after which growth rate stabilizes or decreases. + + CONCLUSIONS: + The deep learning model allowed for fully automatic and robust segmentation of GA on CFIs. These segmentations can be used to extract structural characteristics of GA that predict its growth rate.}, file = {Lief20.pdf:pdf\\Lief20.pdf:PDF}, journal = Ophthalmology, volume = {127}, @@ -13870,7 +17273,9 @@ @article{Lief20 year = {2020}, month = {2}, gsid = {10165923379562366066}, - gscites = {2}, + gscites = {37}, + ss_id = {4d54536ebf1b355d02cdfe742553032666101dbd}, + all_ss_ids = {['f643f4a927cf65f1ec66231ae76d3bc1736a67d3', '4d54536ebf1b355d02cdfe742553032666101dbd']}, } @conference{Lief20a, @@ -13879,15 +17284,15 @@ @conference{Lief20a url = {https://www.euretina.org/congress/amsterdam-2020/virtual-2020-freepapers/}, title = {Achieving expert level performance in quantifying 13 distinctive features of neovascular age-related macular degeneration on optical coherence tomography}, abstract = {Purpose: - To develop and validate an automatic model for volumetric quantification of the 13 most common abnormalities associated with neovascular age-related macular degeneration (nAMD) on optical coherence tomography (OCT). - Setting: - Clinical data and associated imaging were collected from five UK secondary care providers between February 2002 and September 2017. We identified 680 treatment-naive patients with no recent cataract surgery, at least one anti-VEGF injection, a diagnosis of nAMD, and associated OCT imaging (Topcon, Tokyo, Japan). - Methods: - A deep convolutional neural network (CNN) was used to produce a volumetric segmentation of 13 retinal abnormalities. The CNN architecture was based on a deep encoder-decoder structure that combines information from adjacent B-scans. The model was trained on 2,712 B-scans from 307 OCT volumes, with manual labels provided at a voxel-level for all abnormalities by eight graders. Abnormalities that were found in over 80 B-scans were modelled. The performance of the model and graders was assessed on an independent set of 112 B-scans from 112 OCT-volumes of nAMD cases, for which four graders independently provided annotations. To create a reference standard, the outputs of three graders were combined and defined as voxels where at least two out of three agreed. The graders' accuracy was calculated using each grader, in turn, as an observer. The Dice similarity metric was used to compare overlap, calculated per A-scan or per voxel where appropriate. Free-response receiver operator characteristic (FROC) analysis was used for the detection of small abnormalities. The intraclass correlation coefficient (ICC) was used to measure agreement on area or volume measures, with the reference area or volume defined as the average of the three graders. - Results: - Included abnormalities were: intraretinal fluid (IRF), subretinal fluid (SRF), pigment epithelial detachment (PED), subretinal hyperreflective material (SHRM), fibrosis, drusen and drusenoid PED, epiretinal membrane (ERM), outer plexiform layer (OPL) descent, ellipsoid loss, retinal pigment epithelium (RPE) loss or attenuation, hyper-transmission, hyperreflective dots and subretinal drusenoid deposits - reticular pseudodrusen (SDD - RPD). For OPL-descent and fibrosis there were insufficient examples in the test set for a reliable performance estimate.For the other features, the model obtained an average Dice score of 0.63 +- 0.15 (median 0.64), compared to 0.61 +- 0.17 (median 0.60) for the observers. The average ICC for the model was 0.66 +- 0.22 (median 0.69), compared to 0.62 +- 0.21 (median 0.55) for the observers. For individual features, differences between model and observer Dice score were within a 95% confidence interval for all features except ellipsoid loss, where model performance was slightly better (p=0.03). Regarding ICC, model performance was slightly better for IRF (p=0.04) and ellipsoid loss (p=0.006), slightly worse for drusen and drusenoid PED (p=0.03), and within the 95% confidence interval for other features. For hyperreflective dots and SDD-RPD, FROC analysis revealed that the model performed at similar sensitivity per false positives as the observers. - Conclusions: - We present a deep-learning based model that provides accurate volumetric quantification of a comprehensive set of relevant pathological components of nAMD. There was relatively large variability in grader agreement between abnormalities. Nevertheless, model performance was comparable to, and in many cases exceeded, human performance, both in terms of overlap and quantification. The model generates a precise, quantitative morphological signature of the retinal pathology that can facilitate the development of prediction models for treatment response and planning of personalized treatment intervals, as well as further research into structure/function correlation. In clinical care it can facilitate structured reporting, reducing subjectivity in clinicians' assessments and enabling implementation of refined treatment guidelines.The presented model accelerates interpretation of OCT volumes and surpasses manual reading, both in terms of attainable level of extracted information and consistency. This can potentially lead to a reduction of costs in interpretation of clinical trials and improve personalized clinical care.}, + To develop and validate an automatic model for volumetric quantification of the 13 most common abnormalities associated with neovascular age-related macular degeneration (nAMD) on optical coherence tomography (OCT). + Setting: + Clinical data and associated imaging were collected from five UK secondary care providers between February 2002 and September 2017. We identified 680 treatment-naive patients with no recent cataract surgery, at least one anti-VEGF injection, a diagnosis of nAMD, and associated OCT imaging (Topcon, Tokyo, Japan). + Methods: + A deep convolutional neural network (CNN) was used to produce a volumetric segmentation of 13 retinal abnormalities. The CNN architecture was based on a deep encoder-decoder structure that combines information from adjacent B-scans. The model was trained on 2,712 B-scans from 307 OCT volumes, with manual labels provided at a voxel-level for all abnormalities by eight graders. Abnormalities that were found in over 80 B-scans were modelled. The performance of the model and graders was assessed on an independent set of 112 B-scans from 112 OCT-volumes of nAMD cases, for which four graders independently provided annotations. To create a reference standard, the outputs of three graders were combined and defined as voxels where at least two out of three agreed. The graders' accuracy was calculated using each grader, in turn, as an observer. The Dice similarity metric was used to compare overlap, calculated per A-scan or per voxel where appropriate. Free-response receiver operator characteristic (FROC) analysis was used for the detection of small abnormalities. The intraclass correlation coefficient (ICC) was used to measure agreement on area or volume measures, with the reference area or volume defined as the average of the three graders. + Results: + Included abnormalities were: intraretinal fluid (IRF), subretinal fluid (SRF), pigment epithelial detachment (PED), subretinal hyperreflective material (SHRM), fibrosis, drusen and drusenoid PED, epiretinal membrane (ERM), outer plexiform layer (OPL) descent, ellipsoid loss, retinal pigment epithelium (RPE) loss or attenuation, hyper-transmission, hyperreflective dots and subretinal drusenoid deposits - reticular pseudodrusen (SDD - RPD). For OPL-descent and fibrosis there were insufficient examples in the test set for a reliable performance estimate.For the other features, the model obtained an average Dice score of 0.63 +- 0.15 (median 0.64), compared to 0.61 +- 0.17 (median 0.60) for the observers. The average ICC for the model was 0.66 +- 0.22 (median 0.69), compared to 0.62 +- 0.21 (median 0.55) for the observers. For individual features, differences between model and observer Dice score were within a 95% confidence interval for all features except ellipsoid loss, where model performance was slightly better (p=0.03). Regarding ICC, model performance was slightly better for IRF (p=0.04) and ellipsoid loss (p=0.006), slightly worse for drusen and drusenoid PED (p=0.03), and within the 95% confidence interval for other features. For hyperreflective dots and SDD-RPD, FROC analysis revealed that the model performed at similar sensitivity per false positives as the observers. + Conclusions: + We present a deep-learning based model that provides accurate volumetric quantification of a comprehensive set of relevant pathological components of nAMD. There was relatively large variability in grader agreement between abnormalities. Nevertheless, model performance was comparable to, and in many cases exceeded, human performance, both in terms of overlap and quantification. The model generates a precise, quantitative morphological signature of the retinal pathology that can facilitate the development of prediction models for treatment response and planning of personalized treatment intervals, as well as further research into structure/function correlation. In clinical care it can facilitate structured reporting, reducing subjectivity in clinicians' assessments and enabling implementation of refined treatment guidelines.The presented model accelerates interpretation of OCT volumes and surpasses manual reading, both in terms of attainable level of extracted information and consistency. This can potentially lead to a reduction of costs in interpretation of clinical trials and improve personalized clinical care.}, optnote = {DIAG, RADIOLOGY}, year = {2020}, month = {9}, @@ -13897,19 +17302,19 @@ @article{Lief21 title = {Quantification of key retinal features in early and late age-related macular degeneration using deep learning}, author = {Liefers, Bart and Taylor, Paul and Alsaedi, Abdulrahman and Bailey, Clare and Balaskas, Konstantinos and Dhingra, Narendra and Egan, Catherine A and Rodrigues, Filipa Gomes and Gonz\'{a}lez-Gonzalo, Cristina and Heeren, Tjebo F.C. and Lotery, Andrew and Muller, Philipp L. and Olvera-Barrios, Abraham and Paul, Bobby and Schwartz, Roy and Thomas, Darren S. and Warwick, Alasdair N. and Tufail, Adnan and S\'{a}nchez, Clara I.}, abstract = {Purpose: - To develop and validate a deep learning model for segmentation of 13 features associated with neovascular and atrophic age-related macular degeneration (AMD). - - Design: - Development and validation of a deep-learning model for feature segmentation. - - Methods: - Data for model development were obtained from 307 optical coherence tomography volumes. Eight experienced graders manually delineated all abnormalities in 2,712 B-scans. A deep neural network was trained with this data to perform voxel-level segmentation of the 13 most common abnormalities (features). For evaluation, 112 B-scans from 112 patients with a diagnosis of neovascular AMD were annotated by four independent observers. Main outcome measures were Dice score, intra-class correlation coefficient (ICC), and free-response receiver operating characteristic (FROC) curve. - - Results: - On 11 of the 13 features, the model obtained a mean Dice score of 0.63 +- 0.15, compared to 0.61 +- 0.17 for the observers. The mean ICC for the model was 0.66 +- 0.22, compared to 0.62 +- 0.21 for the observers. Two features were not evaluated quantitatively due to lack of data. FROC analysis demonstrated that the model scored similar or higher sensitivity per false positives compared to the observers. - - Conclusions: - The quality of the automatic segmentation matches that of experienced graders for most features, exceeding human performance for some features. The quantified parameters provided by the model can be used in the current clinical routine and open possibilities for further research into treatment response outside clinical trials.}, + To develop and validate a deep learning model for segmentation of 13 features associated with neovascular and atrophic age-related macular degeneration (AMD). + + Design: + Development and validation of a deep-learning model for feature segmentation. + + Methods: + Data for model development were obtained from 307 optical coherence tomography volumes. Eight experienced graders manually delineated all abnormalities in 2,712 B-scans. A deep neural network was trained with this data to perform voxel-level segmentation of the 13 most common abnormalities (features). For evaluation, 112 B-scans from 112 patients with a diagnosis of neovascular AMD were annotated by four independent observers. Main outcome measures were Dice score, intra-class correlation coefficient (ICC), and free-response receiver operating characteristic (FROC) curve. + + Results: + On 11 of the 13 features, the model obtained a mean Dice score of 0.63 +- 0.15, compared to 0.61 +- 0.17 for the observers. The mean ICC for the model was 0.66 +- 0.22, compared to 0.62 +- 0.21 for the observers. Two features were not evaluated quantitatively due to lack of data. FROC analysis demonstrated that the model scored similar or higher sensitivity per false positives compared to the observers. + + Conclusions: + The quality of the automatic segmentation matches that of experienced graders for most features, exceeding human performance for some features. The quantified parameters provided by the model can be used in the current clinical routine and open possibilities for further research into treatment response outside clinical trials.}, journal = AJO, doi = {https://doi.org/10.1016/j.ajo.2020.12.034}, url = {https://www.sciencedirect.com/science/article/abs/pii/S0002939421000088}, @@ -13919,6 +17324,9 @@ @article{Lief21 pmid = {33422464}, publisher = {Elsevier}, optnote = {DIAG}, + ss_id = {67af3a5566aceaf87e2caf82eb587c91ebd41900}, + all_ss_ids = {['67af3a5566aceaf87e2caf82eb587c91ebd41900']}, + gscites = {23}, } @phdthesis{Lief22, @@ -13926,12 +17334,12 @@ @phdthesis{Lief22 title = {Deep Learning Algorithms for Age-Related Macular Degeneration}, url = {https://repository.ubn.ru.nl/handle/2066/252875}, abstract = {This thesis is devoted to the applications of deep learning algorithms for automated analysis of retinal images. - In contains chapters on: - - 1. Automatic detection of the foveal center in OCT scans (Chapter 2); - 2. Segmentation of retinal layers and geographic atrophy (Chapter 3); - 3. Segmentation of geographic atrophy on color fundus (Chapter 4); - 4. Quantification of key retinal features in early and late AMD. (Chapter 5).}, + In contains chapters on: + + 1. Automatic detection of the foveal center in OCT scans (Chapter 2); + 2. Segmentation of retinal layers and geographic atrophy (Chapter 3); + 3. Segmentation of geographic atrophy on color fundus (Chapter 4); + 4. Quantification of key retinal features in early and late AMD. (Chapter 5).}, copromotor = {T. Theelen}, file = {Lief22.pdf:pdf/Lief22.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, @@ -13957,6 +17365,8 @@ @article{LinE05 month = {1}, gsid = {13303135162649110518}, gscites = {108}, + ss_id = {d4dabcc6ef6712de68132daadabb0660bc567a9e}, + all_ss_ids = {['d4dabcc6ef6712de68132daadabb0660bc567a9e']}, } @article{LinE06, @@ -13975,6 +17385,8 @@ @article{LinE06 month = {5}, gsid = {12299719120846507003}, gscites = {195}, + ss_id = {d214f7c0440c815029df15d85ac33c9fabb24d29}, + all_ss_ids = {['d214f7c0440c815029df15d85ac33c9fabb24d29']}, } @inproceedings{Linm20, @@ -13987,6 +17399,43 @@ @inproceedings{Linm20 file = {:pdf/Linm20.pdf:PDF}, optnote = {DIAG}, year = {2020}, + ss_id = {89997fe09abc2c7d8bd73ef0f902e38fc2ae8baa}, + all_ss_ids = {['89997fe09abc2c7d8bd73ef0f902e38fc2ae8baa']}, + gscites = {31}, +} + +@article{Linm23, + author = {Linmans, Jasper and Elfwing, Stefan and van der Laak, Jeroen and Litjens, Geert}, + year = {2023}, + month = {1}, + journal = MIA, + title = {Predictive uncertainty estimation for out-of-distribution detection in digital pathology.}, + doi = {10.1016/j.media.2022.102655}, + pages = {102655}, + volume = {83}, + abstract = {Machine learning model deployment in clinical practice demands real-time risk assessment to identify situations in which the model is uncertain. Once deployed, models should be accurate for classes seen during training while providing informative estimates of uncertainty to flag abnormalities and unseen classes for further analysis. Although recent developments in uncertainty estimation have resulted in an increasing number of methods, a rigorous empirical evaluation of their performance on large-scale digital pathology datasets is lacking. This work provides a benchmark for evaluating prevalent methods on multiple datasets by comparing the uncertainty estimates on both in-distribution and realistic near and far out-of-distribution (OOD) data on a whole-slide level. To this end, we aggregate uncertainty values from patch-based classifiers to whole-slide level uncertainty scores. We show that results found in classical computer vision benchmarks do not always translate to the medical imaging setting. Specifically, we demonstrate that deep ensembles perform best at detecting far-OOD data but can be outperformed on a more challenging near-OOD detection task by multi-head ensembles trained for optimal ensemble diversity. Furthermore, we demonstrate the harmful impact OOD data can have on the performance of deployed machine learning models. Overall, we show that uncertainty estimates can be used to discriminate in-distribution from OOD data with high AUC scores. Still, model deployment might require careful tuning based on prior knowledge of prospective OOD data.}, + file = {:pdf/Linm23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + pmid = {36306568}, + ss_id = {697be6a6398755ff665b968936fef868ec8974bf}, + all_ss_ids = {['697be6a6398755ff665b968936fef868ec8974bf']}, + gscites = {16}, +} + +@article{Linm23a, + author = {Linmans, Jasper and Hoogeboom, Emiel and van der Laak, Jeroen and Litjens, Geert}, + title = {~~The Latent Doctor Model for Modeling Inter-Observer Variability}, + doi = {10.1109/jbhi.2023.3323582}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/JBHI.2023.3323582}, + file = {Linm23a.pdf:pdf\\Linm23a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {IEEE Journal of Biomedical and Health Informatics}, + automatic = {yes}, + citation-count = {0}, + pages = {1-12}, + pmid = {37831571}, } @conference{Litj09, @@ -14026,7 +17475,9 @@ @inproceedings{Litj10a optnote = {DIAG, RADIOLOGY}, pmid = {20879340}, gsid = {3783126959662430798}, - gscites = {3}, + gscites = {4}, + ss_id = {0f0313aea2ba0198c5da4b5eff724caecf885e7e}, + all_ss_ids = {['0f0313aea2ba0198c5da4b5eff724caecf885e7e']}, } @mastersthesis{Litj10b, @@ -14054,7 +17505,9 @@ @inproceedings{Litj11 number = {1}, month = {3}, gsid = {11297243638232679534}, - gscites = {42}, + gscites = {43}, + ss_id = {91a237f32276f0b6c334058b32e1c6dd25ffd8db}, + all_ss_ids = {['91a237f32276f0b6c334058b32e1c6dd25ffd8db']}, } @conference{Litj11b, @@ -14082,6 +17535,8 @@ @inproceedings{Litj12 month = {2}, gsid = {1559733626797444041}, gscites = {24}, + ss_id = {14db29c5408cb2e1d9e66cf4b09034699a024ca6}, + all_ss_ids = {['14db29c5408cb2e1d9e66cf4b09034699a024ca6']}, } @inproceedings{Litj12a, @@ -14097,8 +17552,10 @@ @inproceedings{Litj12a file = {Litj12a.pdf:pdf\\Litj12a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {15659902223096625028}, - gscites = {58}, + gscites = {73}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/111051}, + ss_id = {bcff51ffb3e864ae4f8a4b6de12c2bf019999c7b}, + all_ss_ids = {['bcff51ffb3e864ae4f8a4b6de12c2bf019999c7b']}, } @article{Litj12b, @@ -14136,6 +17593,9 @@ @inproceedings{Litj12d year = {2012}, file = {Litj12d.pdf:pdf\\Litj12d.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {72368b70dd95e303cc4cc1bdaa8f5047225aa773}, + all_ss_ids = {['72368b70dd95e303cc4cc1bdaa8f5047225aa773']}, + gscites = {18}, } @conference{Litj13, @@ -14162,8 +17622,10 @@ @article{Litj14 pmid = {24418598}, month = {2}, gsid = {12911375061118389379}, - gscites = {275}, + gscites = {551}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/137969}, + ss_id = {cc83eabf4f833b8c92dcf6012dcce348591d060f}, + all_ss_ids = {['cc83eabf4f833b8c92dcf6012dcce348591d060f']}, } @inproceedings{Litj14a, @@ -14181,6 +17643,8 @@ @inproceedings{Litj14a month = {3}, gsid = {16281847160609020035}, gscites = {18}, + ss_id = {2f39694d368806dbf90c898dcadb23fa4d2a0ba2}, + all_ss_ids = {['2f39694d368806dbf90c898dcadb23fa4d2a0ba2']}, } @inproceedings{Litj14b, @@ -14197,7 +17661,9 @@ @inproceedings{Litj14b optnote = {DIAG, MAGIC, RADIOLOGY}, month = {3}, gsid = {6518656036620498343}, - gscites = {3}, + gscites = {4}, + ss_id = {639ee4bb8d748364fa12c44a29cb38e925488ada}, + all_ss_ids = {['639ee4bb8d748364fa12c44a29cb38e925488ada']}, } @article{Litj14c, @@ -14216,8 +17682,10 @@ @article{Litj14c publisher = {Institute of Electrical \& Electronics Engineers (IEEE)}, month = {5}, gsid = {2908069285137157626}, - gscites = {234}, + gscites = {377}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/247043}, + ss_id = {1b02bd4ef5cbf6d6fe99c1e52814ae27c17cf96b}, + all_ss_ids = {['1b02bd4ef5cbf6d6fe99c1e52814ae27c17cf96b']}, } @article{Litj14d, @@ -14237,6 +17705,8 @@ @article{Litj14d month = {10}, gsid = {1987898104259315064}, gscites = {16}, + ss_id = {1b62c9397a2b1e488ae68530275cd815204022e1}, + all_ss_ids = {['1b62c9397a2b1e488ae68530275cd815204022e1']}, } @conference{Litj14e, @@ -14261,6 +17731,9 @@ @inproceedings{Litj15 file = {Litj15.pdf:pdf\\Litj15.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {3}, + ss_id = {e14a159a1d16c2acfe48b60c81807300c67ee228}, + all_ss_ids = {['e14a159a1d16c2acfe48b60c81807300c67ee228']}, + gscites = {15}, } @phdthesis{Litj15a, @@ -14292,8 +17765,10 @@ @article{Litj15b pmid = {26060063}, month = {6}, gsid = {4162403948837858436}, - gscites = {47}, + gscites = {54}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/151185}, + ss_id = {a44520f7cfcb2d3867ccb40b54b699b8f23f2acb}, + all_ss_ids = {['a44520f7cfcb2d3867ccb40b54b699b8f23f2acb']}, } @inproceedings{Litj16, @@ -14309,6 +17784,9 @@ @inproceedings{Litj16 file = {Litj16.pdf:pdf\\Litj16.pdf:PDF}, optnote = {DIAG, PATHOLOGY, RADIOLOGY}, month = {3}, + ss_id = {9afbb8e061beb2a23a29150d6f5be1d8507db996}, + all_ss_ids = {['9afbb8e061beb2a23a29150d6f5be1d8507db996']}, + gscites = {0}, } @article{Litj16b, @@ -14325,6 +17803,9 @@ @article{Litj16b optnote = {RADIOLOGY}, pmid = {26192734}, year = {2016}, + ss_id = {7bfa78ee6a6d93bac31a1e29bc0552ca8e5a215c}, + all_ss_ids = {['7bfa78ee6a6d93bac31a1e29bc0552ca8e5a215c', '4f016eb85905d24ce82adeee28bdf66b46870c9e']}, + gscites = {51}, } @article{Litj16c, @@ -14342,7 +17823,9 @@ @article{Litj16c pmid = {27212078}, month = {5}, gsid = {15076999051911417421}, - gscites = {490}, + gscites = {806}, + ss_id = {47262a72c9c7bf5070b97e70b55c6190d1079260}, + all_ss_ids = {['47262a72c9c7bf5070b97e70b55c6190d1079260']}, } @article{Litj17, @@ -14362,6 +17845,8 @@ @article{Litj17 gsid = {17186798094825352745}, gscites = {3681}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/179538}, + ss_id = {2abde28f75a9135c8ed7c50ea16b7b9e49da0c09}, + all_ss_ids = {['2abde28f75a9135c8ed7c50ea16b7b9e49da0c09']}, } @article{Litj18, @@ -14373,15 +17858,17 @@ @article{Litj18 pages = {1--8}, doi = {10.1093/gigascience/giy065}, abstract = {Background: The presence of lymph node metastases is one of the most important factors in breast cancer prognosis. The most common strategy to assess the regional lymph node status is the sentinel lymph node procedure. The sentinel lymph node is the most likely lymph node to contain metastasized cancer cells and is excised, histopathologically processed and examined by the pathologist. This tedious examination process is time-consuming and can lead to small metastases being missed. However, recent advances in whole-slide imaging and machine learning have opened an avenue for analysis of digitized lymph node sections with computer algorithms. For example, convolutional neural networks, a type of machine learning algorithm, are able to automatically detect cancer metastases in lymph nodes with high accuracy. To train machine learning models, large, well-curated datasets are needed. - Results: We released a dataset of 1399 annotated whole-slide images of lymph nodes, both with and without metastases, in total three terabytes of data in the context of the CAMELYON16 and CAMELYON17 Grand Challenges. Slides were collected from five different medical centers to cover a broad range of image appearance and staining variations. Each whole-slide image has a slide-level label indicating whether it contains no metastases, macro-metastases, micro-metastases or isolated tumor cells. Furthermore, for 209 whole-slide images, detailed hand-drawn contours for all metastases are provided. Last, open-source software tools to visualize and interact with the data have been made available. - Conclusions: A unique dataset of annotated, whole-slide digital histopathology images has been provided with high potential for re-use.}, + Results: We released a dataset of 1399 annotated whole-slide images of lymph nodes, both with and without metastases, in total three terabytes of data in the context of the CAMELYON16 and CAMELYON17 Grand Challenges. Slides were collected from five different medical centers to cover a broad range of image appearance and staining variations. Each whole-slide image has a slide-level label indicating whether it contains no metastases, macro-metastases, micro-metastases or isolated tumor cells. Furthermore, for 209 whole-slide images, detailed hand-drawn contours for all metastases are provided. Last, open-source software tools to visualize and interact with the data have been made available. + Conclusions: A unique dataset of annotated, whole-slide digital histopathology images has been provided with high potential for re-use.}, file = {:pdf/Litj18.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, number = {6}, pmid = {29860392}, month = {5}, gsid = {12911118689571475543}, - gscites = {48}, + gscites = {266}, + ss_id = {188f8f6f70947215a9dfeebb0b577155e0d3d339}, + all_ss_ids = {['188f8f6f70947215a9dfeebb0b577155e0d3d339']}, } @article{Litj19, @@ -14399,8 +17886,10 @@ @article{Litj19 optnote = {DIAG, RADIOLOGY}, pmid = {31395244}, gsid = {7949427670884659630}, - gscites = {34}, + gscites = {224}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/208418}, + ss_id = {1e70a09aedc8d91538e269f568b212ca839012b1}, + all_ss_ids = {['1e70a09aedc8d91538e269f568b212ca839012b1']}, } @article{Litj22, @@ -14416,6 +17905,9 @@ @article{Litj22 pmid = {35701372}, year = {2022}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/252047}, + ss_id = {42c19709354c631efc35d2bda8f0d33cf1d79831}, + all_ss_ids = {['42c19709354c631efc35d2bda8f0d33cf1d79831']}, + gscites = {3}, } @article{Liu14, @@ -14432,7 +17924,9 @@ @article{Liu14 number = {2}, month = {7}, gsid = {10049699584799847554}, - gscites = {19}, + gscites = {22}, + ss_id = {a4278e9daff8dd2d492c51d6c629a76d552a9c45}, + all_ss_ids = {['a4278e9daff8dd2d492c51d6c629a76d552a9c45']}, } @inproceedings{Lo09, @@ -14444,6 +17938,8 @@ @inproceedings{Lo09 abstract = {This paper describes a framework for evaluating airway extraction algorithms in a standardized manner and establishing reference segmentations that can be used for future algorithm development. Because of the sheer difficulty of constructing a complete reference standard manually, we propose to construct a reference using results from the algorithms being compared, by splitting each airway tree segmentation result into individual branch segments that are subsequently visually inspected by trained observers. Using the so constructed reference, a total of seven performance measures covering different aspects of segmentation quality are computed. We evaluated 15 airway tree extraction algorithms from different research groups on a diverse set of 20 chest CT scans from subjects ranging from healthy volunteers to patients with severe lung disease, who were scanned at different sites, with several different CT scanner models, and using a variety of scanning protocols and reconstruction parameters.}, file = {Lo09.pdf:pdf\\Lo09.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['d66ed19788611086397ccada6e74c7ba8d759cdf']}, + gscites = {2}, } @inproceedings{Lo10, @@ -14457,7 +17953,9 @@ @inproceedings{Lo10 file = {Lo10.pdf:pdf\\Lo10.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {3547575388613335105}, - gscites = {25}, + gscites = {29}, + ss_id = {56f99e7b81eac197131323001445ecb2eb0569b1}, + all_ss_ids = {['56f99e7b81eac197131323001445ecb2eb0569b1']}, } @inproceedings{Lo11, @@ -14487,8 +17985,10 @@ @article{Lo12 pmid = {22855226}, month = {11}, gsid = {7968104102758490533}, - gscites = {187}, + gscites = {236}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/107854}, + ss_id = {cfcae9fa6bd0b4cad15e40bb0dc4e8b367826acc}, + all_ss_ids = {['cfcae9fa6bd0b4cad15e40bb0dc4e8b367826acc']}, } @inproceedings{Lo13, @@ -14526,16 +18026,16 @@ @conference{Lohn15 optnote = {DIAG}, } -@Conference{Lohuizen23a, - author = {Q. Y. van Lohuizen and C. Roest and F. F. J. Simonis and S. J. Fransen and T. C. Kwee and D. Yakar and H. Huisman}, +@conference{Lohuizen23a, + author = {Q. Y. van Lohuizen and C. Roest and F. F. J. Simonis and S. J. Fransen and T. C. Kwee and D. Yakar and H. Huisman}, booktitle = RSNA, - title = {Diagnostic AI to speed up MRI protocols by identifying redundant sequences: are all diffusion-weighted prostate MRI sequences necessary?}, - abstract = {PURPOSE: Studies showed that AI reconstruction of accelerated MRI improves visual quality, but it is unclear whether this improves diagnostic value. We investigated a novel framework for accelerated MRI by assessing reconstruction quality (naïve- vs AI-reconstructed) by comparing diagnostic performance and visual similarity as an outcome for prostate cancer detection. -METHODS: A retrospective multi-site study was performed on a cohort of 1535 patients who underwent bi-parametric prostate MRI between 2016-2020. An expert radiologist delineated all clinically significant prostate cancer (csPCa) lesions (PI-RADS ≥ 4). T2W scans were retrospectively undersampled in k-space, simulating four (R4) and eight (R8) times acceleration. A 3D U-Net was used to reconstruct undersampled images. The resulting images were fed to an existing state-of-the-art csPCa detection AI to evaluate the effect of AI reconstruction on diagnosis. Visual image quality (SSIM) was compared with a Wilcoxon test. Lesion level diagnostics were evaluated by comparing the partial area-under-the-FROC-curve over the false positive interval 0.1-2.5 (pAUC) using permutation tests. -RESULTS: AI-based reconstruction significantly improved visual quality compared to naïve (IFFT) reconstruction MRI at R4 (SSIM 0.78±0.02 vs 0.68±0.03, p<0.001) and R8 (SSIM 0.67±0.03 vs 0.51±0.03, p<0.001), however, no significant improvements in diagnostic performance were observed for R4 (pAUC FROC 1.33 [CI 1.28-1.39] vs 1.29 [CI 1.23-1.35], p=0.37), nor R8 (pAUC FROC 1.12 [CI 1.07-1.17] vs 0.95 [CI 1.89-1.01], p=0.067). AI-based reconstruction resulted in 0.1 or more decrease in sensitivity compared to unaccelerated MRI. -CONCLUSION: Recovery of visual quality in reconstructions does not correlate with recovering diagnostic quality, emphasizing the importance of measuring diagnostic value rather than visual similarity. AI reconstruction tools should be approached with care because they might have been optimized to reconstruct visually appealing images instead of diagnostic images.}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, + title = {Diagnostic AI to speed up MRI protocols by identifying redundant sequences: are all diffusion-weighted prostate MRI sequences necessary?}, + abstract = {PURPOSE: Studies showed that AI reconstruction of accelerated MRI improves visual quality, but it is unclear whether this improves diagnostic value. We investigated a novel framework for accelerated MRI by assessing reconstruction quality (naive- vs AI-reconstructed) by comparing diagnostic performance and visual similarity as an outcome for prostate cancer detection. + METHODS: A retrospective multi-site study was performed on a cohort of 1535 patients who underwent bi-parametric prostate MRI between 2016-2020. An expert radiologist delineated all clinically significant prostate cancer (csPCa) lesions (PI-RADS >= 4). T2W scans were retrospectively undersampled in k-space, simulating four (R4) and eight (R8) times acceleration. A 3D U-Net was used to reconstruct undersampled images. The resulting images were fed to an existing state-of-the-art csPCa detection AI to evaluate the effect of AI reconstruction on diagnosis. Visual image quality (SSIM) was compared with a Wilcoxon test. Lesion level diagnostics were evaluated by comparing the partial area-under-the-FROC-curve over the false positive interval 0.1-2.5 (pAUC) using permutation tests. + RESULTS: AI-based reconstruction significantly improved visual quality compared to naive (IFFT) reconstruction MRI at R4 (SSIM 0.78+-0.02 vs 0.68+-0.03, p<0.001) and R8 (SSIM 0.67+-0.03 vs 0.51+-0.03, p<0.001), however, no significant improvements in diagnostic performance were observed for R4 (pAUC FROC 1.33 [CI 1.28-1.39] vs 1.29 [CI 1.23-1.35], p=0.37), nor R8 (pAUC FROC 1.12 [CI 1.07-1.17] vs 0.95 [CI 1.89-1.01], p=0.067). AI-based reconstruction resulted in 0.1 or more decrease in sensitivity compared to unaccelerated MRI. + CONCLUSION: Recovery of visual quality in reconstructions does not correlate with recovering diagnostic quality, emphasizing the importance of measuring diagnostic value rather than visual similarity. AI reconstruction tools should be approached with care because they might have been optimized to reconstruct visually appealing images instead of diagnostic images.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, } @article{Loka10, @@ -14554,6 +18054,40 @@ @article{Loka10 month = {10}, gsid = {7815225352498783242}, gscites = {70}, + ss_id = {a717f22666cfef9a62f3e18ac8ed5d81abac272e}, + all_ss_ids = {['a717f22666cfef9a62f3e18ac8ed5d81abac272e']}, +} + +@conference{Loma23, + author = {Robin Lomans and Rachel van der Post and Francesco Ciompi}, + booktitle = {MIDL}, + title = {Interactive Cell Detection in H&E-stained slides of Diffuse Gastric Cancer}, + abstract = {We present an interactive detection model to improve the cell annotation workflow of diffuse gastric cancer. + The model relates image and user inputs and is trained to detect three types of cells in diffuse gastric cancer histology. + We measure model multi-class cell detection performance as per-class F1 score and we show that it increases with the number of user input clicks. + Moreover, we show that the proposed interactive annotation approach substantially reduces the number of required user actions needed for complete image annotation, achieving a 17\% reduction for the multi-class case. + Future work will implement an iterative approach to filter out recurring false positives for further performance improvement.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + +@conference{Loma23a, + author = {Robin Lomans and Jeroen van der Laak and Iris Nagtegaal and Francesco Ciompi and Rachel van der Post}, + booktitle = {European Congress of Pathology}, + title = {Deep learning for multi-class cell detection in H&E-stained slides of diffuse gastric cancer}, + abstract = {Background & objective + Diffuse gastric cancer (DGC) is characterized by poorly cohesive cells which are difficult to detect. We propose the first deep learning model to detect classical signet ring cells (SRCs), atypical SRCs, and poorly differentiated cells in H&E-stained slides of DGC. + + Methods + We collected slides from 9 patients with hereditary DGC, resulting in 105 and 3 whole-slide images (WSIs) of gastric resections and biopsies, respectively. The three target cell types were annotated, resulting in 24,695 cell-level annotations. We trained a deep learning model with the Faster-RCNN architecture using 99 WSIs in the development set. + + Results + The algorithm was tested on 9 WSIs in the independent validation set. Model predictions were counted as correct if they were within a 15-micron radius from the expert reference annotations. For evaluation, we split the detection task into two components: class-independent cell localization (recognition of any tumor cell type) and cell-type classification (categorizing localized cells as the correct types). We found (average) F1 scores of 0.69 and 0.93 for the localization and classification tasks, respectively. Thus, we observe that the algorithm does not generally misclassify cells, but rather, the errors mainly arise from missing cells or false positive predictions of cells that do not belong to the three target classes. + + Conclusion + Future work will focus on improving the cell localization performance of the algorithm. Cell localization of the three target classes will be an important task in a clinical application of our model, in which it could be used to improve the detection of DGC lesions among large sets of slides. Moreover, the algorithm will allow for quantitative assessment of DGC patterns, potentially giving new insights in specific morphological features of DGC such as patterns of spatial cell distributions.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, } @inproceedings{Loog02a, @@ -14567,7 +18101,9 @@ @inproceedings{Loog02a file = {Loog02a.pdf:pdf\\Loog02a.pdf:PDF}, gsid = {17126051828720866801}, optnote = {DIAG, RADIOLOGY}, - gscites = {31}, + gscites = {33}, + ss_id = {284bac5873dc9ae6cd676d5e77f51a7347db061e}, + all_ss_ids = {['284bac5873dc9ae6cd676d5e77f51a7347db061e']}, } @inproceedings{Loog03, @@ -14585,6 +18121,8 @@ @inproceedings{Loog03 month = {5}, gsid = {9395252940507654239}, gscites = {8}, + ss_id = {e783c9b2be90dd57814d8064375e42bef8cd7154}, + all_ss_ids = {['e783c9b2be90dd57814d8064375e42bef8cd7154']}, } @phdthesis{Loog04c, @@ -14613,6 +18151,8 @@ @inproceedings{Loog04d optnote = {DIAG, RADIOLOGY, TB}, gsid = {4373819761535258462}, gscites = {36}, + ss_id = {90f7f1eb9a2d0ff1db35a909062577efca9e32be}, + all_ss_ids = {['90f7f1eb9a2d0ff1db35a909062577efca9e32be']}, } @inproceedings{Loog04e, @@ -14629,6 +18169,8 @@ @inproceedings{Loog04e optnote = {DIAG, RADIOLOGY}, gsid = {13168417199997445959}, gscites = {20}, + ss_id = {07456424645481c6fcb32de1373978bbfc6e6e2c}, + all_ss_ids = {['07456424645481c6fcb32de1373978bbfc6e6e2c']}, } @inproceedings{Loog04f, @@ -14646,6 +18188,8 @@ @inproceedings{Loog04f month = {5}, gsid = {14687688016231883918}, gscites = {10}, + ss_id = {9d694a982ab413c311ead393c98a844394752044}, + all_ss_ids = {['9d694a982ab413c311ead393c98a844394752044']}, } @article{Loog05, @@ -14662,6 +18206,8 @@ @article{Loog05 optnote = {DIAG, RADIOLOGY}, month = {12}, gscites = {23}, + ss_id = {644fcd2ee9894d41c6e886c8067645af514b925f}, + all_ss_ids = {['644fcd2ee9894d41c6e886c8067645af514b925f']}, } @inproceedings{Loog06, @@ -14677,7 +18223,9 @@ @inproceedings{Loog06 file = {Loog06.pdf:pdf\\Loog06.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {5428199913642700502}, - gscites = {12}, + gscites = {19}, + ss_id = {5260cd32e11481ba468d6c8a0464d66627e27f8e}, + all_ss_ids = {['5260cd32e11481ba468d6c8a0464d66627e27f8e']}, } @article{Loog06a, @@ -14695,6 +18243,8 @@ @article{Loog06a month = {5}, gsid = {8560942913013408020}, gscites = {103}, + ss_id = {d5f1619d8d7b6a1f5377a19f4daba1dcef9413aa}, + all_ss_ids = {['d5f1619d8d7b6a1f5377a19f4daba1dcef9413aa']}, } @article{Loog06b, @@ -14712,6 +18262,8 @@ @article{Loog06b month = {12}, gsid = {7955130862773952899}, gscites = {62}, + ss_id = {c238b1c9bdec805216b5c3ee9b650edaf35947d3}, + all_ss_ids = {['c238b1c9bdec805216b5c3ee9b650edaf35947d3']}, } @inproceedings{Lope03, @@ -14724,6 +18276,19 @@ @inproceedings{Lope03 gscites = {4}, } +@article{Lotz21, + author = {Lotz, Johannes and Weiss, Nick and van der Laak, Jeroen and Heldmann, Stefan}, + title = {~~Comparison of Consecutive and Re-stained Sections for Image Registration in Histopathology}, + doi = {10.48550/ARXIV.2106.13150}, + year = {2021}, + abstract = {Purpose: In digital histopathology, virtual multi-staining is important for diagnosis and biomarker research. Additionally, it provides accurate ground-truth for various deep-learning tasks. Virtual multi-staining can be obtained using different stains for consecutive sections or by re-staining the same section. Both approaches require image registration to compensate tissue deformations, but little attention has been devoted to comparing their accuracy. Approach: We compare variational image registration of consecutive and re-stained sections and analyze the effect of the image resolution which influences accuracy and required computational resources. We present a new hybrid dataset of re-stained and consecutive sections (HyReCo, 81 slide pairs, approx. 3000 landmarks) that we made publicly available and compare its image registration results to the automatic non-rigid histological image registration (ANHIR) challenge data (230 consecutive slide pairs). Results: We obtain a median landmark error after registration of 7.1 mm (HyReCo) and 16.0 mm (ANHIR) between consecutive sections. Between re-stained sections, the median registration error is 2.3 mm and 0.9 mm in the two subsets of the HyReCo dataset. We observe that deformable registration leads to lower landmark errors than affine registration in both cases, though the effect is smaller in re-stained sections. Conclusion: Deformable registration of consecutive and re-stained sections is a valuable tool for the joint analysis of different stains. Significance: While the registration of re-stained sections allows nucleus-level alignment which allows for a direct analysis of interacting biomarkers, consecutive sections only allow the transfer of region-level annotations. The latter can be achieved at low computational cost using coarser image resolutions.}, + url = {https://arxiv.org/abs/2106.13150}, + file = {Lotz21.pdf:pdf\\Lotz21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2106.13150}, + automatic = {yes}, +} + @article{Louz14, author = {Louzao Martinez, Laura and Friedlander, Elza and van der Laak, Jeroen A. W. M. and Hebeda, Konnie M.}, title = {Abundance of IgG4+ Plasma Cells in Isolated Reactive Lymphadenopathy Is No Indication of IgG4-Related Disease}, @@ -14738,7 +18303,26 @@ @article{Louz14 optnote = {DIAG}, month = {10}, gsid = {13309261770194300071}, - gscites = {20}, + gscites = {28}, + ss_id = {81460e5c3d1d916f328caad6b1775bd8ce7fa418}, + all_ss_ids = {['81460e5c3d1d916f328caad6b1775bd8ce7fa418']}, +} + +@article{Luit19, + author = {Luiten, Jacky D. and Korte, Bram and Voogd, Adri C. and Vreuls, Willem and Luiten, Ernest J.T. and Strobbe, Luc J. and Rutten, Matthieu J.C.M. and Plaisier, Menno L. and Lohle, Paul N. and Hooijen, Marianne J.H. and Tjan-Heijnen, Vivianne C.G. and Duijm, Lucien E.M.}, + title = {~~Trends in frequency and outcome of high-risk breast lesions at core needle biopsy in women recalled at biennial screening mammography, a multiinstitutional study}, + doi = {10.1002/ijc.32353}, + year = {2019}, + abstract = {Between January 1, 2011, and December 31, 2016, we studied the incidence, management and outcome of high-risk breast lesions in a consecutive series of 376,519 screens of women who received biennial screening mammography. During the 6-year period covered by the study, the proportion of women who underwent core needle biopsy (CNB) after recall remained fairly stable, ranging from 39.2% to 48.1% (mean: 44.2%, 5,212/11,783), whereas the proportion of high-risk lesions at CNB (i.e., flat epithelial atypia, atypical ductal hyperplasia, lobular carcinoma in situ and papillary lesions) gradually increased from 3.2% (25/775) in 2011 to 9.5% (86/901) in 2016 (p < 0.001). The mean proportion of high-risk lesions at CNB that were subsequently treated with diagnostic surgical excision was 51.4% (169/329) and varied between 41.0% and 64.3% through the years, but the excision rate for high-risk lesions per 1,000 screens and per 100 recalls increased from 0.25 (2011) to 0.70 (2016; p < 0.001) and from 0.81 (2011) to 2.50 (2016; p < 0.001), respectively. The proportion of all diagnostic surgical excisions showing in situ or invasive breast cancer was 29.0% (49/169) and varied from 22.2% (8/36) in 2014 to 38.5% (5/13) in 2011. In conclusion, the proportion of high-risk lesions at CNB tripled in a 6-year period, with a concomitant increased excision rate for these lesions. As the proportion of surgical excisions showing in situ or invasive breast cancer did not increase, a rising number of screened women underwent invasive surgical excision with benign outcome.}, + url = {http://dx.doi.org/10.1002/ijc.32353}, + file = {Luit19.pdf:pdf\\Luit19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {International Journal of Cancer}, + automatic = {yes}, + citation-count = {10}, + pages = {2720-2727}, + volume = {145}, + pmid = {31001821}, } @mastersthesis{Lux20, @@ -14775,7 +18359,9 @@ @article{Maas19 pmid = {30946180}, month = {7}, gsid = {14299452348209473588}, - gscites = {2}, + gscites = {23}, + ss_id = {e8b28dbb1bbb2944e3ec69a7cb9a9c33deeead88}, + all_ss_ids = {['e8b28dbb1bbb2944e3ec69a7cb9a9c33deeead88']}, } @inproceedings{MacL08, @@ -14839,6 +18425,8 @@ @inproceedings{Madu11a abstract = {Cavities are air-filled spaces within a pulmonary consolidation and can be indicative of various diseases like primary bronchogenic carcinoma, mycobacterium tuberculosis, cancer and infections. Segmentation of cavities is a challenging task in chest radiographs due to the presence of superimposed structures. It is important to accurately measure the extent of cavitation to measure temporal changes and response to therapy. In this paper, we propose a semi-automatic technique for cavity border segmentation based on dynamic programming. A pixel classifier is trained using cavity border pixels based on Gaussian, location and Hessian features to construct a cavity wall likelihood map. A polar transformation of this likelihood map around the seed point is then used as a cost function to find an optimal border using dynamic programming. We have validated our technique on 50 chest radiographs (2048 x 2048 resolution, pixel size 0.25 mm, Delft Imaging Systems, The Netherlands) containing in total 50 cavities. These cavities have been manually outlined by three human experts, one chest radiologist and two readers certified to read chest radiographs according to a tuberculosis scoring system. The automatic border segmentations are compared with manual segmentations provided by the readers using Jaccard overlapping measure. The agreement between the automatically determined outlines is comparable to the inter-observer agreement.}, file = {Madu11a.pdf:pdf\\Madu11a.pdf:PDF}, optnote = {DIAG, RADIOLOGY, TB}, + all_ss_ids = {['1bae2990c101fa15e5f9b20919ab1d58da72e6f3']}, + gscites = {0}, } @conference{Madu11b, @@ -14865,7 +18453,9 @@ @inproceedings{Madu13 number = {16}, month = {3}, gsid = {5079156226762569028}, - gscites = {20}, + gscites = {26}, + ss_id = {7738d6a2cfdba31d771d4a8ab6649f2718ded3d5}, + all_ss_ids = {['7738d6a2cfdba31d771d4a8ab6649f2718ded3d5']}, } @inproceedings{Madu13a, @@ -14883,7 +18473,9 @@ @inproceedings{Madu13a number = {118}, month = {3}, gsid = {540514792832839267}, - gscites = {7}, + gscites = {12}, + ss_id = {a016412367ed40d9c5b76383109dcc4f833ff12f}, + all_ss_ids = {['a016412367ed40d9c5b76383109dcc4f833ff12f']}, } @conference{Madu13b, @@ -14910,6 +18502,9 @@ @article{Madu13c pmid = {24200278}, month = {12}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/125314}, + ss_id = {a53cbf0c57a5e06312397f04c0629c31731e26da}, + all_ss_ids = {['a53cbf0c57a5e06312397f04c0629c31731e26da']}, + gscites = {66}, } @article{Madu14, @@ -14927,8 +18522,10 @@ @article{Madu14 pmid = {24989390}, month = {6}, gsid = {5011186305563263998}, - gscites = {2}, + gscites = {7}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/137611}, + ss_id = {d1db95a442a2c17c4e60e4fee01372fa400ff636}, + all_ss_ids = {['d1db95a442a2c17c4e60e4fee01372fa400ff636']}, } @conference{Madu15, @@ -14968,8 +18565,44 @@ @article{Madu16 pmid = {26688067}, month = {2}, gsid = {8541446038177947163}, - gscites = {19}, + gscites = {39}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/168319}, + ss_id = {7e2f4b83c10f388e8ceab04bf559f30f299f4111}, + all_ss_ids = {['7e2f4b83c10f388e8ceab04bf559f30f299f4111']}, +} + +@article{Maer22, + author = {Maertens, Johan and Lodewyck, Tom and Donnelly, J Peter and Chantepie, Sylvain and Robin, Christine and Blijlevens, Nicole and Turlure, Pascal and Selleslag, Dominik and Baron, Fr\'{e}d\'{e}ric and Aoun, Mickael and Heinz, Werner J and Bertz, Hartmut and R\'{a}\v{c}il, Zdenek and Vandercam, Bernard and Drgona, Lubos and Coiteux, Valerie and Llorente, Cristina Castilla and Schaefer-Prokop, Cornelia and Paesmans, Marianne and Ameye, Lieveke and Meert, Liv and Cheung, Kin Jip and Hepler, Deborah A and Loeffler, J\"{u}rgen and Barnes, Rosemary and Marchetti, Oscar and Verweij, Paul and Lamoth, Frederic and Bochud, Pierre-Yves and Schwarzinger, Michael and Cordonnier, Catherine and for the Infectious Diseases Group and the Acute Leukemia Group of the European Organization for Research and Treatment of Cancer}, + title = {~~Empiric vs Preemptive Antifungal Strategy in High-Risk Neutropenic Patients on Fluconazole Prophylaxis: A Randomized Trial of the European Organization for Research and Treatment of Cancer}, + doi = {10.1093/cid/ciac623}, + year = {2022}, + abstract = {Abstract + + Background + Empiric antifungal therapy is considered the standard of care for high-risk neutropenic patients with persistent fever. The impact of a preemptive, diagnostic-driven approach based on galactomannan screening and chest computed tomography scan on demand on survival and on the risk of invasive fungal disease (IFD) during the first weeks of high-risk neutropenia is unknown. + + + Methods + Patients with acute myeloid leukemia (AML) or myelodysplastic syndrome (MDS) and allogeneic hematopoietic cell transplant recipients were randomly assigned to receive caspofungin empirically (arm A) or preemptively (arm B), while receiving fluconazole 400 mg daily prophylactically. The primary end point of this noninferiority study was overall survival (OS) 42 days after randomization. + + + Results + Of 556 patients recruited, 549 were eligible: 275 in arm A and 274 in arm B. Eighty percent of the patients had AML or MDS requiring high-dose chemotherapy, and 93% of them were in the first induction phase. At day 42, the OS was not inferior in arm B (96.7%; 95% confidence interval [CI], 93.8%-98.3%) when compared with arm A (93.1%; 95% CI, 89.3%-95.5%). The rates of IFDs at day 84 were not significantly different, 7.7% (95% CI, 4.5%-10.8%) in arm B vs 6.6% (95% CI, 3.6%-9.5%) in arm A. The rate of patients who received caspofungin was significantly lower in arm B (27%) than in arm A (63%; P &lt; .001). + + + Conclusions + The preemptive antifungal strategy was safe for high-risk neutropenic patients given fluconazole as prophylaxis, halving the number of patients receiving antifungals without excess mortality or IFDs. + Clinical Trials Registration. NCT01288378; EudraCT 2010-020814-27. + }, + url = {http://dx.doi.org/10.1093/cid/ciac623}, + file = {Maer22.pdf:pdf\\Maer22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Clinical Infectious Diseases}, + automatic = {yes}, + citation-count = {8}, + pages = {674-682}, + volume = {76}, + pmid = {35906831}, } @inproceedings{Mahm14, @@ -15001,8 +18634,10 @@ @article{Maho20 optnote = {DIAG, RADIOLOGY}, pmid = {31930429}, gsid = {17440625968335195014}, - gscites = {3}, + gscites = {41}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/217776}, + ss_id = {64a8a40675ff707a6a3de3af238dc03e30c7186e}, + all_ss_ids = {['64a8a40675ff707a6a3de3af238dc03e30c7186e']}, } @article{Maie18a, @@ -15020,7 +18655,8 @@ @article{Maie18a optnote = {DIAG}, pmid = {30523263}, gsid = {3078574965052198238,9198386791607085917}, - gscites = {56}, + gscites = {242}, + all_ss_ids = {['91d23b702b9a59bf75c5162c3250017b526c0e69', 'bed661aebb0ede3c407e044637ce60a6d7ccfe5f']}, } @article{Maie19, @@ -15032,6 +18668,8 @@ @article{Maie19 abstract = {The number of biomedical image analysis challenges organized per year is steadily increasing. These international competitions have the purpose of benchmarking algorithms on common data sets, typically to identify the best method for a given problem. Recent research, however, revealed that common practice related to challenge reporting does not allow for adequate interpretation and reproducibility of results. To address the discrepancy between the impact of challenges and the quality (control), the Biomedical I mage Analysis ChallengeS (BIAS) initiative developed a set of recommendations for the reporting of challenges. The BIAS statement aims to improve the transparency of the reporting of a biomedical image analysis challenge regardless of field of application, image modality or task category assessed. This article describes how the BIAS statement was developed and presents a checklist which authors of biomedical image analysis challenges are encouraged to include in their submission when giving a paper on a challenge into review. The purpose of the checklist is to standardize and facilitate the review process and raise interpretability and reproducibility of challenge results by making relevant information explicit.}, file = {Maie19.pdf:pdf\\Maie19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['2968b96fae50ce329147b26f9c29d49439b74985']}, + gscites = {72}, } @article{Maie20, @@ -15048,7 +18686,22 @@ @article{Maie20 pages = {101796}, pmid = {32911207}, gsid = {17435494069051207769}, - gscites = {5}, + gscites = {72}, + ss_id = {2968b96fae50ce329147b26f9c29d49439b74985}, + all_ss_ids = {['2968b96fae50ce329147b26f9c29d49439b74985']}, +} + +@article{Maie22, + author = {Maier-Hein, Lena and Reinke, Annika and Godau, Patrick and Tizabi, Minu D. and Buettner, Florian and Christodoulou, Evangelia and Glocker, Ben and Isensee, Fabian and Kleesiek, Jens and Kozubek, Michal and Reyes, Mauricio and Riegler, Michael A. and Wiesenfarth, Manuel and Kavur, A. Emre and Sudre, Carole H. and Baumgartner, Michael and Eisenmann, Matthias and Heckmann-N\"{o}tzel, Doreen and R\"{a}dsch, A. Tim and Acion, Laura and Antonelli, Michela and Arbel, Tal and Bakas, Spyridon and Benis, Arriel and Blaschko, Matthew and Cardoso, M. Jorge and Cheplygina, Veronika and Cimini, Beth A. and Collins, Gary S. and Farahani, Keyvan and Ferrer, Luciana and Galdran, Adrian and van Ginneken, Bram and Haase, Robert and Hashimoto, Daniel A. and Hoffman, Michael M. and Huisman, Merel and Jannin, Pierre and Kahn, Charles E. and Kainmueller, Dagmar and Kainz, Bernhard and Karargyris, Alexandros and Karthikesalingam, Alan and Kenngott, Hannes and Kofler, Florian and Kopp-Schneider, Annette and Kreshuk, Anna and Kurc, Tahsin and Landman, Bennett A. and Litjens, Geert and Madani, Amin and Maier-Hein, Klaus and Martel, Anne L. and Mattson, Peter and Meijering, Erik and Menze, Bjoern and Moons, Karel G. M. and M\"{u}ller, Henning and Nichyporuk, Brennan and Nickel, Felix and Petersen, Jens and Rajpoot, Nasir and Rieke, Nicola and Saez-Rodriguez, Julio and S\'{a}nchez, Clara I. and Shetty, Shravya and van Smeden, Maarten and Summers, Ronald M. and Taha, Abdel A. and Tiulpin, Aleksei and Tsaftaris, Sotirios A. and Van Calster, Ben and Varoquaux, Ga\"{e}l and J\"{a}ger, Paul F.}, + title = {~~Metrics reloaded: Recommendations for image analysis validation}, + doi = {10.48550/ARXIV.2206.01653}, + year = {2022}, + abstract = {Increasing evidence shows that flaws in machine learning (ML) algorithm validation are an underestimated global problem. Particularly in automatic biomedical image analysis, chosen performance metrics often do not reflect the domain interest, thus failing to adequately measure scientific progress and hindering translation of ML techniques into practice. To overcome this, our large international expert consortium created Metrics Reloaded, a comprehensive framework guiding researchers in the problem-aware selection of metrics. Following the convergence of ML methodology across application domains, Metrics Reloaded fosters the convergence of validation methodology. The framework was developed in a multi-stage Delphi process and is based on the novel concept of a problem fingerprint - a structured representation of the given problem that captures all aspects that are relevant for metric selection, from the domain interest to the properties of the target structure(s), data set and algorithm output. Based on the problem fingerprint, users are guided through the process of choosing and applying appropriate validation metrics while being made aware of potential pitfalls. Metrics Reloaded targets image analysis problems that can be interpreted as a classification task at image, object or pixel level, namely image-level classification, object detection, semantic segmentation, and instance segmentation tasks. To improve the user experience, we implemented the framework in the Metrics Reloaded online tool, which also provides a point of access to explore weaknesses, strengths and specific recommendations for the most common validation metrics. The broad applicability of our framework across domains is demonstrated by an instantiation for various biological and medical image analysis use cases.}, + url = {https://arxiv.org/abs/2206.01653}, + file = {Maie22.pdf:pdf\\Maie22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2206.01653}, + automatic = {yes}, } @inproceedings{Maka10, @@ -15066,6 +18719,21 @@ @inproceedings{Maka10 month = {3}, gsid = {16713159183791215628}, gscites = {6}, + ss_id = {bb1e8f7832cf85a1d49ee002dbb6e72d0c8251d6}, + all_ss_ids = {['bb1e8f7832cf85a1d49ee002dbb6e72d0c8251d6']}, +} + +@article{Mama23, + author = {Mamani, Gabriel Efrain Humpire and Lessmann, Nikolas and Scholten, Ernst Th. and Prokop, Mathias and Jacobs, Colin and van Ginneken, Bram}, + title = {~~Kidney abnormality segmentation in thorax-abdomen CT scans}, + doi = {10.48550/ARXIV.2309.03383}, + year = {2023}, + abstract = {In this study, we introduce a deep learning approach for segmenting kidney parenchyma and kidney abnormalities to support clinicians in identifying and quantifying renal abnormalities such as cysts, lesions, masses, metastases, and primary tumors. Our end-to-end segmentation method was trained on 215 contrast-enhanced thoracic-abdominal CT scans, with half of these scans containing one or more abnormalities. We began by implementing our own version of the original 3D U-Net network and incorporated four additional components: an end-to-end multi-resolution approach, a set of task-specific data augmentations, a modified loss function using top-$k$, and spatial dropout. Furthermore, we devised a tailored post-processing strategy. Ablation studies demonstrated that each of the four modifications enhanced kidney abnormality segmentation performance, while three out of four improved kidney parenchyma segmentation. Subsequently, we trained the nnUNet framework on our dataset. By ensembling the optimized 3D U-Net and the nnUNet with our specialized post-processing, we achieved marginally superior results. Our best-performing model attained Dice scores of 0.965 and 0.947 for segmenting kidney parenchyma in two test sets (20 scans without abnormalities and 30 with abnormalities), outperforming an independent human observer who scored 0.944 and 0.925, respectively. In segmenting kidney abnormalities within the 30 test scans containing them, the top-performing method achieved a Dice score of 0.585, while an independent second human observer reached a score of 0.664, suggesting potential for further improvement in computerized methods. All training data is available to the research community under a CC-BY 4.0 license on https://doi.org/10.5281/zenodo.8014289}, + url = {https://arxiv.org/abs/2309.03383}, + file = {Mama23.pdf:pdf\\Mama23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2309.03383}, + automatic = {yes}, } @inproceedings{Mann00, @@ -15340,7 +19008,9 @@ @article{Mann10 pmid = {20605737}, month = {12}, gsid = {9948311098295922507}, - gscites = {40}, + gscites = {48}, + ss_id = {75cd956b09c2eda5131b334555c749a6c314e486}, + all_ss_ids = {['75cd956b09c2eda5131b334555c749a6c314e486']}, } @conference{Mann10a, @@ -15368,8 +19038,10 @@ @article{Mann11 pmid = {21780225}, month = {7}, gsid = {4842131897026662112}, - gscites = {19}, + gscites = {22}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/97748}, + ss_id = {50d7d177be4b1e7fa5f9c7371604718f128d6528}, + all_ss_ids = {['50d7d177be4b1e7fa5f9c7371604718f128d6528']}, } @conference{Mann12a, @@ -15408,7 +19080,9 @@ @article{Mann14 pmid = {24691143}, month = {9}, gsid = {10399546970199943856}, - gscites = {113}, + gscites = {150}, + ss_id = {afb28ac7f8988cf0da21a8b18a2cd37ac5350917}, + all_ss_ids = {['afb28ac7f8988cf0da21a8b18a2cd37ac5350917']}, } @article{Mann16, @@ -15426,8 +19100,10 @@ @article{Mann16 optnote = {DIAG, RADIOLOGY}, pmid = {26114226}, gsid = {681530597847692433}, - gscites = {18}, + gscites = {28}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/171318}, + ss_id = {0023c0af3833ba26698ff22e236f3625f1533e86}, + all_ss_ids = {['0023c0af3833ba26698ff22e236f3625f1533e86']}, } @article{Mann16a, @@ -15445,7 +19121,9 @@ @article{Mann16a optnote = {DIAG}, pmid = {27917312}, gsid = {8010355516467544387}, - gscites = {1}, + gscites = {3}, + ss_id = {198800090856d9799b21c965032eab972e1ee578}, + all_ss_ids = {['198800090856d9799b21c965032eab972e1ee578']}, } @conference{Mann16c, @@ -15454,15 +19132,15 @@ @conference{Mann16c booktitle = RSNA, year = {2016}, abstract = {PURPOSE: White matter (WM) and gray matter (GM) respond differently to ischemia and thrombolytic treatment. Being able to differentiate WM/GM in CT enables tissue dependent perfusion analysis and automated detection of stroke related pathology. In this work we show the feasibility of segmenting WM/GM in 4DCT images of acute ischemic stroke patients. - - METHOD AND MATERIALS: In total 18 stroke patients who received both a 4DCT and followup MR scan were included in this retrospective study. CT imaging was done on a 320 row scanner with 19 or 24 volumetric acquisitions after contrast injection resulting in 512x512x320 isotropic voxels of 0.5 mm. T1w imaging was done on a 1.5T MR scanner resulting in approximately 384x318x26 voxels of 0.6x0.6x5.5 mm. The MR image was segmented with FSL tools and served as reference standard to train and evaluate the method. The method starts with brain segmentation by atlas registration followed by a refinement using a geodesic active contour with dominating advection term steered by a gradient based speed function. Within the segmented brain, three groups of features are then extracted: intensity, contextual and temporal, including a multiscale representation of the temporal average image weighted according to the exposures of the individual time points to maximize the signaltonoise ratios. In total 120 features were then fed into a nonlinear support vector machine with Gaussian radial basis kernel. A leaveonepatient out cross validation was carried out. Segmentation results were visually inspected for overall quality. Dice coefficient (DC) and 95th percentile Hausdorff distance (HD) were reported. - - RESULTS: The segmentations were evaluated as good with the separation of WM/GM at the cortex good to excellent. GM segmentation at the cortex had generally less thickness variations compared to the reference standard. DC were 0.79+-0.06 and 0.77+-0.06, 95% HD were 8.71+-3.22 and 7.11+-3.93 mm, for WM and GM, respectively. - - CONCLUSION: WM and GM segmentation in 4DCT is feasible. - - - CLINICAL RELEVANCE/APPLICATION: WM and GM segmentation in 4DCT enables tissue dependent perfusion analysis and may increase sensitivity of detecting core and penumbra. Volume measurements of WM and GM normalized with the contralateral side may yield an important diagnostic parameter in the acute phase of ischemia.}, + + METHOD AND MATERIALS: In total 18 stroke patients who received both a 4DCT and followup MR scan were included in this retrospective study. CT imaging was done on a 320 row scanner with 19 or 24 volumetric acquisitions after contrast injection resulting in 512x512x320 isotropic voxels of 0.5 mm. T1w imaging was done on a 1.5T MR scanner resulting in approximately 384x318x26 voxels of 0.6x0.6x5.5 mm. The MR image was segmented with FSL tools and served as reference standard to train and evaluate the method. The method starts with brain segmentation by atlas registration followed by a refinement using a geodesic active contour with dominating advection term steered by a gradient based speed function. Within the segmented brain, three groups of features are then extracted: intensity, contextual and temporal, including a multiscale representation of the temporal average image weighted according to the exposures of the individual time points to maximize the signaltonoise ratios. In total 120 features were then fed into a nonlinear support vector machine with Gaussian radial basis kernel. A leaveonepatient out cross validation was carried out. Segmentation results were visually inspected for overall quality. Dice coefficient (DC) and 95th percentile Hausdorff distance (HD) were reported. + + RESULTS: The segmentations were evaluated as good with the separation of WM/GM at the cortex good to excellent. GM segmentation at the cortex had generally less thickness variations compared to the reference standard. DC were 0.79+-0.06 and 0.77+-0.06, 95% HD were 8.71+-3.22 and 7.11+-3.93 mm, for WM and GM, respectively. + + CONCLUSION: WM and GM segmentation in 4DCT is feasible. + + + CLINICAL RELEVANCE/APPLICATION: WM and GM segmentation in 4DCT enables tissue dependent perfusion analysis and may increase sensitivity of detecting core and penumbra. Volume measurements of WM and GM normalized with the contralateral side may yield an important diagnostic parameter in the acute phase of ischemia.}, optnote = {DIAG, RADIOLOGY}, } @@ -15480,7 +19158,8 @@ @article{Mann17 pmid = {28273920}, month = {3}, gsid = {7982996011980143322}, - gscites = {17}, + gscites = {23}, + all_ss_ids = {['682e8febc5d053766fefe1ee2de5ca2ff39762f2', '7f1eabb6591699816ccd89e7765a66dc54a7175c', '86bab119f07a492350c6cc7bd06ed91c73ee807a', 'e7f15929ff71a5a33af66ece92d707a7be7aebc8']}, } @inproceedings{Mann98, @@ -15519,6 +19198,37 @@ @mastersthesis{Mann99a journal = {Master thesis}, } +@inproceedings{Marc17, + author = {Marchesi, Agnese and Bria, Alessandro and Marrocco, Claudio and Molinara, Mario and Mordang, Jan-Jurre and Tortorella, Francesco and Karssemeijer, Nico}, + title = {~~The Effect of Mammogram Preprocessing on Microcalcification Detection with Convolutional Neural Networks}, + doi = {10.1109/cbms.2017.29}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/CBMS.2017.29}, + file = {Marc17.pdf:pdf\\Marc17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {2017 IEEE 30th International Symposium on Computer-Based Medical Systems (CBMS)}, + automatic = {yes}, + citation-count = {7}, +} + +@article{Marc22, + author = {Marchesin, Stefano and Giachelle, Fabio and Marini, Niccolo and Atzori, Manfredo and Boytcheva, Svetla and Buttafuoco, Genziana and Ciompi, Francesco and Di Nunzio, Giorgio Maria and Fraggetta, Filippo and Irrera, Ornella and Muller, Henning and Primov, Todor and Vatrano, Simona and Silvello, Gianmaria}, + title = {Empowering digital pathology applications through explainable knowledge extraction tools.}, + doi = {10.1016/j.jpi.2022.100139}, + pages = {100139}, + volume = {13}, + abstract = {Exa-scale volumes of medical data have been produced for decades. In most cases, the diagnosis is reported in free text, encoding medical knowledge that is still largely unexploited. In order to allow decoding medical knowledge included in reports, we propose an unsupervised knowledge extraction system combining a rule-based expert system with pre-trained Machine Learning (ML) models, namely the Semantic Knowledge Extractor Tool (SKET). Combining rule-based techniques and pre-trained ML models provides high accuracy results for knowledge extraction. This work demonstrates the viability of unsupervised Natural Language Processing (NLP) techniques to extract critical information from cancer reports, opening opportunities such as data mining for knowledge extraction purposes, precision medicine applications, structured report creation, and multimodal learning. SKET is a practical and unsupervised approach to extracting knowledge from pathology reports, which opens up unprecedented opportunities to exploit textual and multimodal medical information in clinical practice. We also propose SKET eXplained (SKET X), a web-based system providing visual explanations about the algorithmic decisions taken by SKET. SKET X is designed/developed to support pathologists and domain experts in understanding SKET predictions, possibly driving further improvements to the system.}, + file = {Marc22.pdf:pdf\\Marc22.pdf:PDF}, + journal = {Journal of pathology informatics}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36268087}, + year = {2022}, + ss_id = {57c9e588f21034990f834f5f0610959332d3ed70}, + all_ss_ids = {['57c9e588f21034990f834f5f0610959332d3ed70']}, + gscites = {10}, +} + @article{Mari21, author = {Niccol{\`{o}} Marini and Sebastian Ot{\'{a}}lora and Damian Podareanu and Mart van Rijthoven and Jeroen van der Laak and Francesco Ciompi and Henning Muller and Manfredo Atzori}, title = {Multi{\_}Scale{\_}Tools: A Python Library to Exploit Multi-Scale Whole Slide Images}, @@ -15530,6 +19240,9 @@ @article{Mari21 optnote = {DIAG}, publisher = {Frontiers Media {SA}}, year = {2021}, + ss_id = {51c291c635a61682b1abcfc99c6e21079ba39b67}, + all_ss_ids = {['51c291c635a61682b1abcfc99c6e21079ba39b67']}, + gscites = {14}, } @inproceedings{Mari21a, @@ -15539,6 +19252,41 @@ @inproceedings{Mari21a file = {Mari21a.pdf:pdf\\Mari21a.pdf:PDF}, optnote = {DIAG}, year = {2021}, + ss_id = {695568836a9c87507435fa5cbae0f014666043b8}, + all_ss_ids = {['695568836a9c87507435fa5cbae0f014666043b8']}, + gscites = {11}, +} + +@article{Mari22, + author = {Marini, Niccolo and Marchesin, Stefano and Otalora, Sebastian and Wodzinski, Marek and Caputo, Alessandro and van Rijthoven, Mart and Aswolinskiy, Witali and Bokhorst, John-Melle and Podareanu, Damian and Petters, Edyta and Boytcheva, Svetla and Buttafuoco, Genziana and Vatrano, Simona and Fraggetta, Filippo and van der Laak, Jeroen and Agosti, Maristella and Ciompi, Francesco and Silvello, Gianmaria and Muller, Henning and Atzori, Manfredo}, + title = {Unleashing the potential of digital pathology data by training computer-aided diagnosis models without human annotations.}, + doi = {10.1038/s41746-022-00635-4}, + issue = {1}, + pages = {102}, + volume = {5}, + abstract = {The digitalization of clinical workflows and the increasing performance of deep learning algorithms are paving the way towards new methods for tackling cancer diagnosis. However, the availability of medical specialists to annotate digitized images and free-text diagnostic reports does not scale with the need for large datasets required to train robust computer-aided diagnosis methods that can target the high variability of clinical cases and data produced. This work proposes and evaluates an approach to eliminate the need for manual annotations to train computer-aided diagnosis tools in digital pathology. The approach includes two components, to automatically extract semantically meaningful concepts from diagnostic reports and use them as weak labels to train convolutional neural networks (CNNs) for histopathology diagnosis. The approach is trained (through 10-fold cross-validation) on 3'769 clinical images and reports, provided by two hospitals and tested on over 11'000 images from private and publicly available datasets. The CNN, trained with automatically generated labels, is compared with the same architecture trained with manual labels. Results show that combining text analysis and end-to-end deep neural networks allows building computer-aided diagnosis tools that reach solid performance (micro-accuracy = 0.908 at image-level) based only on existing clinical data without the need for manual annotations.}, + file = {Mari22.pdf:pdf\\Mari22.pdf:PDF}, + journal = {NPJ digital medicine}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35869179}, + year = {2022}, + ss_id = {79c9209f3892a5a704abb6be161ca7b41d02775d}, + all_ss_ids = {['79c9209f3892a5a704abb6be161ca7b41d02775d']}, + gscites = {28}, +} + +@inproceedings{Marr18, + author = {Marrocco, Claudio and Bria, Alessandro and Di Sano, Valerio and Borges, Lucas R. and Savelli, Benedetta and Molinara, Mario and Mordang, Jan-Jurre and Karssemeijer, Nico and Tortorella, Francesco}, + title = {~~Mammogram denoising to improve the calcification detection performance of convolutional nets}, + doi = {10.1117/12.2318069}, + year = {2018}, + abstract = {Recently, Convolutional Neural Networks (CNNs) have been successfully used to detect microcalcifications in mammograms. An important step in CNN-based detection is image preprocessing that, in raw mammograms, is usually employed to equalize or remove the intensity-dependent quantum noise. In this work, we show how removing the noise can significantly improve the microcalcification detection performance of a CNN. To this end, we describe the quantum noise with a uniform square-root model. Under this assumption, the generalized Anscombe transformation is applied to the raw mammograms by estimating the noise characteristics from the image at hand. In the Anscombe domain, noise is filtered through an adaptive Wiener filter. The denoised images are recovered with an appropriate inverse transformation and are then used to train the CNN-based detector. Experiments were performed on 1,066 mammograms acquired with GE Senographe systems. MC detection performance of a CNN on noise-free mammograms was statistically significantly higher than on unprocessed mammograms. Results were also superior in comparison with a nonparametric noise-equalizing transformation previously proposed for digital mammograms.}, + url = {http://dx.doi.org/10.1117/12.2318069}, + file = {Marr18.pdf:pdf\\Marr18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {14th International Workshop on Breast Imaging (IWBI 2018)}, + automatic = {yes}, + citation-count = {5}, } @mastersthesis{Mart21, @@ -15552,6 +19300,23 @@ @mastersthesis{Mart21 journal = {Master thesis}, } +@article{Maus18, + author = {Mauschitz, Matthias M. and Bonnemaijer, Pieter W.M. and Diers, Kersten and Rauscher, Franziska G. and Elze, Tobias and Engel, Christoph and Loeffler, Markus and Colijn, Johanna Maria and Ikram, M. Arfan and Vingerling, Johannes R. and Williams, Katie M. and Hammond, Christopher J. and Creuzot-Garcher, Catherine and Bron, Alain M. and Silva, Rufino and Nunes, Sandrina and Delcourt, C\'{e}cile and Cougnard-Gr\'{e}goire, Audrey and Holz, Frank G. and Klaver, Caroline C.W. and Breteler, Monique M.B. and Finger, Robert P. and Acar, Niyazi and Anastosopoulos, Eleftherios and Azuara-Blanco, Augusto and Berendschot, Tos and Berendschot, Tos and Bergen, Arthur and Bertelsen, Geir and Binquet, Christine and Bird, Alan and Bobak, Martin and Larsen, Morten B\ogelund and Boon, Camiel and Bourne, Rupert and Br\'{e}tillon, Lionel and Broe, Rebecca and Bron, Alain and Buitendijk, Gabrielle and Cachulo, Maria Luz and Capuano, Vittorio and Carri\`{e}re, Isabelle and Chakravarthy, Usha and Chan, Michelle and Chang, Petrus and Colijn, Johanna and Cougnard-Gr\'{e}goire, Audrey and Cree, Angela and Creuzot-Garcher, Catherine and Cumberland, Phillippa and Cunha-Vaz, Jos\'{e} and Daien, Vincent and De Jong, Eiko and Deak, Gabor and Delcourt, C\'{e}cile and Delyfer, Marie-No\"{e}lle and Hollander, Anneke den and Dietzel, Martha and Erke, Maja Gran and Faria, Pedro and Farinha, Claudia and Fauser, Sascha and Finger, Robert and Fletcher, Astrid and Foster, Paul and Founti, Panayiota and Gorgels, Theo and Grauslund, Jakob and Grus, Franz and Hammond, Christopher and Hense, Hans-Werner and Hermann, Manuel and Hoehn, Ren\'{e} and Hogg, Ruth and Holz, Frank and Hoyng, Carel and Jansonius, Nomdo and Janssen, Sarah and Kersten, Eveline and Khawaja, Anthony and Klaver, Caroline and Korobelnik, Jean-Fran\c{c}ois and Lamparter, Julia and Le Goff, M\'{e}lanie and Lechanteur, Yara and Lehtim\"{a}ki, Terho and Leung, Irene and Lotery, Andrew and Mauschitz, Matthias and Meester, Magda and Merle, B\'{e}n\'{e}dicte and Meyer zu Westrup, Verena and Midena, Edoardo and Miotto, Stefania and Mirshahi, Alireza and Mohan-Sa\"{i}d, Sadek and Mueller, Michael and Muldrew, Alyson and Murta, Joaquim and Nickels, Stefan and Nunes, Sandrina and Owen, Christopher and Peto, Tunde and Pfeiffer, Norbert and Piermarocchi, Stefano and Prokofyeva, Elena and Rahi, Jugnoo and Raitakari, Olli and Rauscher, Franziska and Ribeiro, Luisa and Rougier, Marie-B\'{e}n\'{e}dicte and Rudnicka, Alicja and Sahel, Jos\'{e} and Salonikiou, Aggeliki and Sanchez, Clarisa and Schmitz-Valckenberg, Steffen and Schuster, Alexander and Schweitzer, C\'{e}dric and Segato, Tatiana and Shehata, Jasmin and Silva, Rufino and Silvestri, Giuliana and Simader, Christian and Souied, Eric and Speckauskas, Martynas and Springelkamp, Henriet and Tapp, Robyn and Topouzis, Fotis and van Leeuwen, Elisa and Verhoeven, Virginie and Verzijden, Timo and Von Hanno, Therese and Wiedemann, Peter and Williams, Katie and Wolfram, Christian and Yip, Jennifer and Zerbib, Jennyfer}, + title = {~~Systemic and Ocular Determinants of Peripapillary Retinal Nerve Fiber Layer Thickness Measurements in the European Eye Epidemiology (E3) Population}, + doi = {10.1016/j.ophtha.2018.03.026}, + year = {2018}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ophtha.2018.03.026}, + file = {Maus18.pdf:pdf\\Maus18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Ophthalmology}, + automatic = {yes}, + citation-count = {56}, + pages = {1526-1536}, + volume = {125}, + pmid = {29716786}, +} + @article{McLo04, author = {Kristin J McLoughlin and Philip J Bones and Nico Karssemeijer}, title = {Noise equalization for detection of microcalcification clusters in direct digital mammogram images}, @@ -15567,6 +19332,8 @@ @article{McLo04 month = {3}, gsid = {8745936118971532376}, gscites = {106}, + ss_id = {81cfc7e735194a4493f16e501f985fa009bfdff7}, + all_ss_ids = {['81cfc7e735194a4493f16e501f985fa009bfdff7']}, } @inproceedings{Mehr17, @@ -15647,6 +19414,8 @@ @inproceedings{Meij15a month = {3}, gsid = {72943470504272677}, gscites = {2}, + ss_id = {b7911fd32d678c756874904e35b13dd3fa225a56}, + all_ss_ids = {['b7911fd32d678c756874904e35b13dd3fa225a56']}, } @article{Meij15c, @@ -15658,16 +19427,16 @@ @article{Meij15c pages = {1282-1290}, doi = {10.1002/mrm.26024}, abstract = {Purpose - There is currently controversy regarding the benefits of deconvolution-based parameters in stroke imaging, with studies suggesting a similar infarct prediction using summary parameters. We investigate here the performance of deconvolution-based parameters and summary parameters for dynamic-susceptibility contrast (DSC) MRI analysis, with particular emphasis on precision. - - Methods - Numerical simulations were used to assess the contribution of noise and arterial input function (AIF) variability to measurement precision. A realistic AIF range was defined based on in vivo data from an acute stroke clinical study. The simulated tissue curves were analyzed using two popular singular value decomposition (SVD) based algorithms, as well as using summary parameters. - - Results - SVD-based deconvolution methods were found to considerably reduce the AIF-dependency, but a residual AIF bias remained on the calculated parameters. Summary parameters, in turn, show a lower sensitivity to noise. The residual AIF-dependency for deconvolution methods and the large AIF-sensitivity of summary parameters was greatly reduced when normalizing them relative to normal tissue. - - Conclusion - Consistent with recent studies suggesting high performance of summary parameters in infarct prediction, our results suggest that DSC-MRI analysis using properly normalized summary parameters may have advantages in terms of lower noise and AIF-sensitivity as compared to commonly used deconvolution methods.}, + There is currently controversy regarding the benefits of deconvolution-based parameters in stroke imaging, with studies suggesting a similar infarct prediction using summary parameters. We investigate here the performance of deconvolution-based parameters and summary parameters for dynamic-susceptibility contrast (DSC) MRI analysis, with particular emphasis on precision. + + Methods + Numerical simulations were used to assess the contribution of noise and arterial input function (AIF) variability to measurement precision. A realistic AIF range was defined based on in vivo data from an acute stroke clinical study. The simulated tissue curves were analyzed using two popular singular value decomposition (SVD) based algorithms, as well as using summary parameters. + + Results + SVD-based deconvolution methods were found to considerably reduce the AIF-dependency, but a residual AIF bias remained on the calculated parameters. Summary parameters, in turn, show a lower sensitivity to noise. The residual AIF-dependency for deconvolution methods and the large AIF-sensitivity of summary parameters was greatly reduced when normalizing them relative to normal tissue. + + Conclusion + Consistent with recent studies suggesting high performance of summary parameters in infarct prediction, our results suggest that DSC-MRI analysis using properly normalized summary parameters may have advantages in terms of lower noise and AIF-sensitivity as compared to commonly used deconvolution methods.}, file = {Meij15c.pdf:pdf\\Meij15c.pdf:PDF}, optnote = {DIAG}, pmid = {26519871}, @@ -15681,14 +19450,14 @@ @conference{Meij16 booktitle = RSNA, year = {2016}, abstract = {PURPOSE: Due to partial volume effects, accurate segmentation of small cerebral vessels on {CT} is a challenge. We present a novel technique that incorporates local intensity histogram information to segment the cerebral vasculature on {CT} perfusion ({CTP}) scans for suspected ischemic stroke. - - METHOD AND MATERIALS: A pattern recognition approach based on global and local image features followed by a random forest classifier is proposed. The features consist of an automatically computed brain mask denoting intracranial tissue, the first volume of the {CTP} scan, the {CTP} scan temporal average weighted according to the individual exposures to maximize signal-to-noise ratio, the weighted temporal variance ({WTV}), and local histogram features of the {WTV} calculated in a neighborhood of 9x9x9 voxels around a centered voxel. The mean, standard deviation, entropy and mode of the histogram are extracted as local feature values. In total 26 patients that underwent {CTP} for suspicion of stroke were included in this study. The {CTP} was acquired on a 320-detector row scanner. Image size was 512x512x320 voxels by 19 time points with voxel sizes of approximately 0.5 mm. Training was done on 8 patients with manually annotated data. The remaining 18 patients were used as testing set. Segmentations were visually inspected for completeness and overall quality. 3D-patches including the {M2}/{M3} segments of the middle cerebral artery were manually annotated for quantitative evaluation. The modified Hausdorff distance ({MHD}) (maximum of the median {HD}s) and the accuracy (true positive + true negative voxels divided by amount of voxels in a patch) of the segmentation were reported for the annotated patches. - - RESULTS: Overall the method was capable of segmenting the complete cerebral vasculature with inclusion of very small distal vessels. Parts of one internal carotid was missed in one patient because of clipping artefacts. In 3 patients false positive voxels were observed in the skull base region near the internal carotid artery and cavernous sinus. The {MHD} was 0.51A-A?A 1/2 0.28 mm, which is similar to the voxel spacing, and the accuracy was 0.97A-A?A 1/2 0.01. - - CONCLUSION: Our approach provides high-quality segmentation of small cerebral vessels from {CTP} data. - - CLINICAL RELEVANCE/APPLICATION: The high quality segmentation provided by our approach is an important step towards the automated localization and evaluation of vascular pathology in acute stroke patients.}, + + METHOD AND MATERIALS: A pattern recognition approach based on global and local image features followed by a random forest classifier is proposed. The features consist of an automatically computed brain mask denoting intracranial tissue, the first volume of the {CTP} scan, the {CTP} scan temporal average weighted according to the individual exposures to maximize signal-to-noise ratio, the weighted temporal variance ({WTV}), and local histogram features of the {WTV} calculated in a neighborhood of 9x9x9 voxels around a centered voxel. The mean, standard deviation, entropy and mode of the histogram are extracted as local feature values. In total 26 patients that underwent {CTP} for suspicion of stroke were included in this study. The {CTP} was acquired on a 320-detector row scanner. Image size was 512x512x320 voxels by 19 time points with voxel sizes of approximately 0.5 mm. Training was done on 8 patients with manually annotated data. The remaining 18 patients were used as testing set. Segmentations were visually inspected for completeness and overall quality. 3D-patches including the {M2}/{M3} segments of the middle cerebral artery were manually annotated for quantitative evaluation. The modified Hausdorff distance ({MHD}) (maximum of the median {HD}s) and the accuracy (true positive + true negative voxels divided by amount of voxels in a patch) of the segmentation were reported for the annotated patches. + + RESULTS: Overall the method was capable of segmenting the complete cerebral vasculature with inclusion of very small distal vessels. Parts of one internal carotid was missed in one patient because of clipping artefacts. In 3 patients false positive voxels were observed in the skull base region near the internal carotid artery and cavernous sinus. The {MHD} was 0.51A-A?A 1/2 0.28 mm, which is similar to the voxel spacing, and the accuracy was 0.97A-A?A 1/2 0.01. + + CONCLUSION: Our approach provides high-quality segmentation of small cerebral vessels from {CTP} data. + + CLINICAL RELEVANCE/APPLICATION: The high quality segmentation provided by our approach is an important step towards the automated localization and evaluation of vascular pathology in acute stroke patients.}, file = {Meij16.pdf:pdf\\Meij16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, } @@ -15699,13 +19468,13 @@ @conference{Meij17 booktitle = ECR, year = {2017}, abstract = {PURPOSE - To perform a pilot study to explore the effect of a new post-processing technique for 4D-CTA on speed and accuracy of the detection of intracranial vessel occlusions in acute stroke. This technique color-codes the contrast arrival time in the cerebral vasculature in 4D-CTA so that abnormally delayed vascular territories are easily detected. - METHOD AND MATERIALS - We selected 10 patients without and 10 patients with a confirmed single vessel occlusion on CTA from our database of acute ischemic stroke patients, so that occlusions of the ICA, MCA, ACA and PCA of varying subtlety were included. Whole-brain CT perfusion was performed on a 320 detector-row scanner. Color-coded 4D-CTA images were obtained by centering the color scale of vessel time-to-peak (TTP) on the modus of the TTP histogram. Temporal MIP of 4D-CTA with and without color-coding were evaluated in random order for the presence of vessel occlusion by to two neuroradiologists. Time-to-detection and accuracy of detection of vessel occlusions were evaluated. - RESULTS - One false-positive vessel occlusion was rated on color-mapping by both observers. Overall, the average time-to-detection decreased from 37.0s to 19.4s (p<0.03) and the average accuracy of vessel occlusion detection increased from 0.825 to 0.85 with color-mapping. - CONCLUSION - Color-mapping of cerebral vasculature in 4D-CTA improves the speed and may improve the accuracy of the detection of vessel occlusions in acute stroke patients.}, + To perform a pilot study to explore the effect of a new post-processing technique for 4D-CTA on speed and accuracy of the detection of intracranial vessel occlusions in acute stroke. This technique color-codes the contrast arrival time in the cerebral vasculature in 4D-CTA so that abnormally delayed vascular territories are easily detected. + METHOD AND MATERIALS + We selected 10 patients without and 10 patients with a confirmed single vessel occlusion on CTA from our database of acute ischemic stroke patients, so that occlusions of the ICA, MCA, ACA and PCA of varying subtlety were included. Whole-brain CT perfusion was performed on a 320 detector-row scanner. Color-coded 4D-CTA images were obtained by centering the color scale of vessel time-to-peak (TTP) on the modus of the TTP histogram. Temporal MIP of 4D-CTA with and without color-coding were evaluated in random order for the presence of vessel occlusion by to two neuroradiologists. Time-to-detection and accuracy of detection of vessel occlusions were evaluated. + RESULTS + One false-positive vessel occlusion was rated on color-mapping by both observers. Overall, the average time-to-detection decreased from 37.0s to 19.4s (p<0.03) and the average accuracy of vessel occlusion detection increased from 0.825 to 0.85 with color-mapping. + CONCLUSION + Color-mapping of cerebral vasculature in 4D-CTA improves the speed and may improve the accuracy of the detection of vessel occlusions in acute stroke patients.}, file = {Meij17.pdf:pdf\\Meij17.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, } @@ -15717,13 +19486,15 @@ @article{Meij17a year = {2017}, volume = {7}, abstract = {A robust method is presented for the segmentation of the full cerebral vasculature in 4-dimensional (4D) computed tomography (CT). The method consists of candidate vessel selection, feature extraction, random forest classiffcation and postprocessing. Image features include among others the weighted temporal variance image and parameters, including entropy, of an intensity histogram in a local region at di - erent scales. These histogram parameters revealed to be a strong feature in the detection of vessels regardless of shape and size. The method was trained and tested on a large database of 264 patients with suspicion of acute ischemic stroke who underwent 4D CT in our hospital in the period January 2014 to December 2015. In this database there is a large variety of patients observed in every day clinical practice. The method was trained on 19 4D CT images of patients with manual annotations by two trained medical assistants. Five subvolumes representing different regions of the cerebral vasculature were annotated in each image in the training set. The evaluation of the method was done on 242 patients. One out of fve subvolumes was randomly annotated in 159 patients and was used for quantitative evaluation. Segmentations were inspected visually for the entire study cohort to assess failures. A total of 16 (<8%) patients showed severe under- or over-segmentation and were reported as failures. Quantitative evaluation in comparison to the reference annotation showed a Dice coeffcient of 0.91 +- 0.07 and a modiffed Hausdorff distance of 0.23 +- 0.22 mm, which is smaller than voxel spacing.}, + erent scales. These histogram parameters revealed to be a strong feature in the detection of vessels regardless of shape and size. The method was trained and tested on a large database of 264 patients with suspicion of acute ischemic stroke who underwent 4D CT in our hospital in the period January 2014 to December 2015. In this database there is a large variety of patients observed in every day clinical practice. The method was trained on 19 4D CT images of patients with manual annotations by two trained medical assistants. Five subvolumes representing different regions of the cerebral vasculature were annotated in each image in the training set. The evaluation of the method was done on 242 patients. One out of fve subvolumes was randomly annotated in 159 patients and was used for quantitative evaluation. Segmentations were inspected visually for the entire study cohort to assess failures. A total of 16 (<8%) patients showed severe under- or over-segmentation and were reported as failures. Quantitative evaluation in comparison to the reference annotation showed a Dice coeffcient of 0.91 +- 0.07 and a modiffed Hausdorff distance of 0.23 +- 0.22 mm, which is smaller than voxel spacing.}, doi = {10.1038/s41598-017-15617-w}, file = {Meij17a.pdf:pdf\\Meij17a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {29142240}, url = {https://www.nature.com/articles/s41598-017-15617-w}, month = {11}, + all_ss_ids = {['6119971f513cfa3a9dce2cff139f041f11bcb404', 'fb5c4bf41fd879c9c64028a817db7f7ab6aba429']}, + gscites = {38}, } @inproceedings{Meij18, @@ -15736,12 +19507,14 @@ @inproceedings{Meij18 pages = {105751Q}, doi = {10.1117/12.2292974}, abstract = {Segmentation of the arteries and veins of the cerebral vasculature is important for improved visualization and for the detection of vascular related pathologies including arterio-venous malformations. We propose a three dimensional fully convolutational neural network (CNN), with Time-to-Signal images as input, extended with the distance to the center of gravity of the brain as spatial feature integrated at the abstract level of the CNN. The method is trained and validated on 6 and tested on 4 4D CT patient imaging data. The reference standard was acquired by manual annotations by an experienced observer. Quantitative evaluation shows a mean Dice similarity coefficient of 0.936 +- 0.027 and 0.973 +- 0.012, a mean absolute volume di - erence of 4.36 +- 5.47 % and 1.79 +- 2.26 % for artery and vein respectively and an overall accuracy of 0.962 +- 0.017. Average calculation time per volume on the test set was approximately one minute. Our method shows promising results and enables fast and accurate segmentation of arteries and veins in full 4D CT imaging data.}, + erence of 4.36 +- 5.47 % and 1.79 +- 2.26 % for artery and vein respectively and an overall accuracy of 0.962 +- 0.017. Average calculation time per volume on the test set was approximately one minute. Our method shows promising results and enables fast and accurate segmentation of arteries and veins in full 4D CT imaging data.}, file = {Meij18.pdf:pdf\\Meij18.pdf:PDF}, optnote = {DIAG, Radiology}, month = {2}, gsid = {11326400040196917956}, - gscites = {10}, + gscites = {13}, + ss_id = {d5822d3e621307d4928b2bd20643640676b09495}, + all_ss_ids = {['d5822d3e621307d4928b2bd20643640676b09495']}, } @article{Meij18a, @@ -15753,18 +19526,20 @@ @article{Meij18a pages = {421-426}, doi = {10.1016/j.wneu.2018.02.189}, abstract = {Background - In case of carotid artery occlusion, the risk and extent of ischemic cerebral damage is highly dependent on the pathways of collateral flow, including the anatomy of the circle of Willis. - In this report, cases are presented to illustrate that {4D-CTA} can be considered as a noninvasive alternative to DSA for the evaluation of circle of Willis collateral flow. - Case Description - Five patients with unilateral internal carotid artery ({ICA}) occlusion underwent {4D-CTA} for the evaluation of intracranial hemodynamics. Next to a visual evaluation of {4D-CTA}, temporal information was visualized using a normalized color scale on the cerebral vasculature, which enabled quantification of the contrast bolus arrival time. In these patients, {4D-CTA} demonstrated dominant {MCA} blood supply on the side of {ICA} occlusion originating either from the contralateral {ICA} or from the posterior circulation via the communicating arteries. - Conclusions - Temporal dynamics of collateral flow in the circle of Willis can be depicted with {4D-CTA} in patients with a unilateral carotid artery occlusion.}, + In case of carotid artery occlusion, the risk and extent of ischemic cerebral damage is highly dependent on the pathways of collateral flow, including the anatomy of the circle of Willis. + In this report, cases are presented to illustrate that {4D-CTA} can be considered as a noninvasive alternative to DSA for the evaluation of circle of Willis collateral flow. + Case Description + Five patients with unilateral internal carotid artery ({ICA}) occlusion underwent {4D-CTA} for the evaluation of intracranial hemodynamics. Next to a visual evaluation of {4D-CTA}, temporal information was visualized using a normalized color scale on the cerebral vasculature, which enabled quantification of the contrast bolus arrival time. In these patients, {4D-CTA} demonstrated dominant {MCA} blood supply on the side of {ICA} occlusion originating either from the contralateral {ICA} or from the posterior circulation via the communicating arteries. + Conclusions + Temporal dynamics of collateral flow in the circle of Willis can be depicted with {4D-CTA} in patients with a unilateral carotid artery occlusion.}, file = {Meij18a.pdf:pdf\\Meij18a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {29530689}, month = {6}, gsid = {7527352445172307553}, gscites = {5}, + ss_id = {b8f20f8de8a22406d7308e6a7e2bc9508bf78210}, + all_ss_ids = {['b8f20f8de8a22406d7308e6a7e2bc9508bf78210']}, } @conference{Meij18b, @@ -15773,13 +19548,13 @@ @conference{Meij18b booktitle = RSNA, year = {2018}, abstract = {PURPOSE: Segmentation of the complete cerebral vasculature in {4D-CTA} is important for improved visualization, automated pathology detection and assessment of the collateral flow. We present a deep learning approach to segment the complete cerebral vasculature in {4D-CTA} of patients with suspected stroke. - MATERIALS AND METHODS: In total 162 patients that underwent {4D-CTA} for suspicion of stroke were included in this study. The scans were acquired on a 320-detector row scanner (Canon Medical Systems Corporation, Japan). Image size was 512x512x320 voxels by 19 time points with isotropic voxel sizes of approximately 0.5 mm. A 3D fully convolutional neural network ({CNN}), U-Net, was proposed with integration of a spatial feature in the final convolutional layer of the network. The weighted temporal average and variance were derived from the 4D-CTA and used as input for the network. As spatial feature the Euclidean distance from the center of the brain to the skull was used. Training was done on 19 patients with manually annotated data. The remaining 143 patients were used as testing set. Segmentations were visually inspected for completeness and overall quality. Two observers manually annotated three dimensional sub-volumes throughout the brain to include different sized vessels for quantitative evaluation. The Dice similarity coefficient ({DSC}) and Mean Contour Distance ({MCD}) of the segmentations were reported. - RESULTS - Overall the method was capable of segmenting the complete cerebral vasculature. Smaller distal vessels (e.g. M3) showed similar segmentation results as the larger vessels (e.g. internal carotid artery). The {DSC} was 0.91+-0.08 and the {MCD} was 0.26+-0.24 mm which is below voxel spacing. Computation time was less than 90 seconds for processing a full {4D-CTA} data set. - CONCLUSION: - A 3D U-Net with spatial features provides fast, robust and accurate segmentations of the full cerebral vasculature in {4D-CTA}. - Clinical Relevance - The high quality segmentation provided by our method is an important step towards the automated localization and evaluation of vascular pathology in acute stroke patients.}, + MATERIALS AND METHODS: In total 162 patients that underwent {4D-CTA} for suspicion of stroke were included in this study. The scans were acquired on a 320-detector row scanner (Canon Medical Systems Corporation, Japan). Image size was 512x512x320 voxels by 19 time points with isotropic voxel sizes of approximately 0.5 mm. A 3D fully convolutional neural network ({CNN}), U-Net, was proposed with integration of a spatial feature in the final convolutional layer of the network. The weighted temporal average and variance were derived from the 4D-CTA and used as input for the network. As spatial feature the Euclidean distance from the center of the brain to the skull was used. Training was done on 19 patients with manually annotated data. The remaining 143 patients were used as testing set. Segmentations were visually inspected for completeness and overall quality. Two observers manually annotated three dimensional sub-volumes throughout the brain to include different sized vessels for quantitative evaluation. The Dice similarity coefficient ({DSC}) and Mean Contour Distance ({MCD}) of the segmentations were reported. + RESULTS + Overall the method was capable of segmenting the complete cerebral vasculature. Smaller distal vessels (e.g. M3) showed similar segmentation results as the larger vessels (e.g. internal carotid artery). The {DSC} was 0.91+-0.08 and the {MCD} was 0.26+-0.24 mm which is below voxel spacing. Computation time was less than 90 seconds for processing a full {4D-CTA} data set. + CONCLUSION: + A 3D U-Net with spatial features provides fast, robust and accurate segmentations of the full cerebral vasculature in {4D-CTA}. + Clinical Relevance + The high quality segmentation provided by our method is an important step towards the automated localization and evaluation of vascular pathology in acute stroke patients.}, file = {Meij18b.pdf:pdf\\Meij18b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, } @@ -15790,14 +19565,14 @@ @conference{Meij18c booktitle = ESNR, year = {2018}, abstract = {{PURPOSE} - Nowadays {4D-CTA} is available as a non-invasive alternative to conventional angiography, with reported high diagnostic accuracy in the evaluation of different neurovascular disorders, including arteriovenous shunts and collateral flow pathways. Optimized processing of {4D-CTA} is crucial, considering the large amount of data generated. Enhanced visualization of {4D-CTA} can be achieved by applying color-mapping of temporal information in the cerebral vasculature. - {METHOD AND MATERIALS} - Color-mapping processing of {4D-CTA} is achieved in two steps. First, the full cerebral vasculature is segmented by features extraction and random forest classification. Second, the color-scale is adjusted using the histogram using the histogram of the arrival times of the segmented vessels. Early contrast bolus arrival(e.g. healthy internal carotid artery) is labeled red, intermediate arrival yellow, and delayed contrast arrival is labeled blue. - Color-mapping of {4D-CTA} was applied in patients suspected of cranial arteriovenous shunts, and in patients with unilateral carotid artery occlusion for the evaluation of circle of Willis collateral flow. The patients were scanned on a wide-row 320 slice detector {CT} (Toshiba Aquilion {ONE}), enabling whole-head coverage at high temporal resolution. - {RESULTS} - Arterialization of venous vascular structures is the hallmark of arterio-venous shunts, which is easily and accurately identified on color-mapping of {4D-CTA}. Temporal dynamics of collateral flow in the circle of Willis is adequately depicted with {4D-CTA} in patients with unilateral carotid artery occlusion. - {CONCLUSION} - Color-mapping of {4D-CTA} accurately displays temporal information of the cerebral vasculature, which can facilitate the detection of arterio-venous shunts and the evaluation of collateral flow in intracranial steno-occlusive disease.}, + Nowadays {4D-CTA} is available as a non-invasive alternative to conventional angiography, with reported high diagnostic accuracy in the evaluation of different neurovascular disorders, including arteriovenous shunts and collateral flow pathways. Optimized processing of {4D-CTA} is crucial, considering the large amount of data generated. Enhanced visualization of {4D-CTA} can be achieved by applying color-mapping of temporal information in the cerebral vasculature. + {METHOD AND MATERIALS} + Color-mapping processing of {4D-CTA} is achieved in two steps. First, the full cerebral vasculature is segmented by features extraction and random forest classification. Second, the color-scale is adjusted using the histogram using the histogram of the arrival times of the segmented vessels. Early contrast bolus arrival(e.g. healthy internal carotid artery) is labeled red, intermediate arrival yellow, and delayed contrast arrival is labeled blue. + Color-mapping of {4D-CTA} was applied in patients suspected of cranial arteriovenous shunts, and in patients with unilateral carotid artery occlusion for the evaluation of circle of Willis collateral flow. The patients were scanned on a wide-row 320 slice detector {CT} (Toshiba Aquilion {ONE}), enabling whole-head coverage at high temporal resolution. + {RESULTS} + Arterialization of venous vascular structures is the hallmark of arterio-venous shunts, which is easily and accurately identified on color-mapping of {4D-CTA}. Temporal dynamics of collateral flow in the circle of Willis is adequately depicted with {4D-CTA} in patients with unilateral carotid artery occlusion. + {CONCLUSION} + Color-mapping of {4D-CTA} accurately displays temporal information of the cerebral vasculature, which can facilitate the detection of arterio-venous shunts and the evaluation of collateral flow in intracranial steno-occlusive disease.}, file = {Meij18c.pdf:pdf\\Meij18c.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, } @@ -15817,6 +19592,9 @@ @article{Meij19a publisher = {Am Soc Neuroradiology}, year = {2019}, month = {8}, + ss_id = {42f7d5d9b1df931226dcdbf354a07e357cdbc186}, + all_ss_ids = {['42f7d5d9b1df931226dcdbf354a07e357cdbc186']}, + gscites = {4}, } @phdthesis{Meij20, @@ -15845,6 +19623,9 @@ @article{Meij20a volume = {66}, pages = {101810}, year = {2020}, + ss_id = {3db60e34dad56e3949508d8dc1324d5331f9a2c6}, + all_ss_ids = {['3db60e34dad56e3949508d8dc1324d5331f9a2c6']}, + gscites = {14}, } @article{Meij20b, @@ -15859,6 +19640,9 @@ @article{Meij20b number = {4}, pages = {e190178}, year = {2020}, + ss_id = {ffa6b97fd94b3641ddf22af707719a42ee4db9ac}, + all_ss_ids = {['ffa6b97fd94b3641ddf22af707719a42ee4db9ac']}, + gscites = {8}, } @inproceedings{Mein19, @@ -15870,6 +19654,9 @@ @inproceedings{Mein19 abstract = {Convolutional neural networks have not only been applied for classification of voxels, ob-jects, or images, for instance, but have also been proposed as a bodypart regressor. We pickup this underexplored idea and evaluate its value for registration: A CNN is trained to out-put the relative height within the human body in axial CT scans, and the resulting scoresare used for quick alignment between different timepoints. Preliminary results confirm thatthis allows both fast and robust prealignment compared with iterative approaches.}, file = {Mein19.pdf:pdf\\Mein19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {f5d48773035e68452f06310f5db78cb11299dc0a}, + all_ss_ids = {['f5d48773035e68452f06310f5db78cb11299dc0a']}, + gscites = {2}, } @article{Melb11, @@ -15905,6 +19692,8 @@ @inproceedings{Mele12 optnote = {DIAG, RADIOLOGY}, gsid = {11683397277520872641}, gscites = {1}, + ss_id = {ed8e8be2e3c9e0520090e9b2065377d9623f01f2}, + all_ss_ids = {['ed8e8be2e3c9e0520090e9b2065377d9623f01f2']}, } @conference{Mele12a, @@ -15930,7 +19719,9 @@ @inproceedings{Mele14 optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {7361253230908315066}, - gscites = {11}, + gscites = {13}, + ss_id = {1e742e102e77881837a2c8b8ada295217a7ea4ef}, + all_ss_ids = {['1e742e102e77881837a2c8b8ada295217a7ea4ef']}, } @article{Mele14a, @@ -15948,7 +19739,9 @@ @article{Mele14a pmid = {25163057}, month = {1}, gsid = {4430711056723080400}, - gscites = {52}, + gscites = {105}, + ss_id = {32eab666e2a48a743f3d278ed9974582e513553d}, + all_ss_ids = {['32eab666e2a48a743f3d278ed9974582e513553d']}, } @article{Mele14b, @@ -15968,6 +19761,8 @@ @article{Mele14b gsid = {9401041546322028279}, gscites = {7}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/132831}, + ss_id = {98b95b0af8ba3c25a87ad5755519164fcfeee2d0}, + all_ss_ids = {['98b95b0af8ba3c25a87ad5755519164fcfeee2d0']}, } @phdthesis{Mele15, @@ -16000,7 +19795,9 @@ @article{Mele16 optnote = {DIAG, RADIOLOGY}, pmid = {26660889}, gsid = {14270097264018368898}, - gscites = {28}, + gscites = {48}, + ss_id = {b0a13599be9ea703f27a90bc544511ed8c93b2b5}, + all_ss_ids = {['b0a13599be9ea703f27a90bc544511ed8c93b2b5']}, } @article{Mele16a, @@ -16018,7 +19815,9 @@ @article{Mele16a pmid = {27126741}, month = {4}, gsid = {1152274775860878952}, - gscites = {53}, + gscites = {103}, + ss_id = {3b198ccfa068ba133c3c1eae1c8beac1d2ce88e8}, + all_ss_ids = {['3b198ccfa068ba133c3c1eae1c8beac1d2ce88e8']}, } @article{Mele17, @@ -16036,8 +19835,10 @@ @article{Mele17 optnote = {DIAG, RADIOLOGY}, pmid = {28786796}, gsid = {14209453799880160478}, - gscites = {10}, + gscites = {20}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/181866}, + ss_id = {8125ba843b6e5c20524c9b549398a368e0613d88}, + all_ss_ids = {['8125ba843b6e5c20524c9b549398a368e0613d88']}, } @article{Mele18, @@ -16055,7 +19856,9 @@ @article{Mele18 pmid = {29663963}, month = {5}, gsid = {3793586673643236456}, - gscites = {10}, + gscites = {21}, + ss_id = {0831d694fa14d96916f62226c6c74d862f90772c}, + all_ss_ids = {['0831d694fa14d96916f62226c6c74d862f90772c']}, } @article{Mema05, @@ -16088,6 +19891,8 @@ @inproceedings{Mend06 optnote = {DIAG, RADIOLOGY}, gsid = {7760805046827918819}, gscites = {4}, + ss_id = {c0dcc63c3114c66cadb229b2f66e7a0b7ee3fbb5}, + all_ss_ids = {['c0dcc63c3114c66cadb229b2f66e7a0b7ee3fbb5']}, } @conference{Mend07a, @@ -16114,6 +19919,8 @@ @inproceedings{Mend09 month = {2}, gsid = {3518168224524955974}, gscites = {1}, + ss_id = {fc218a97ffea02d88a808f2706c966874c998ce5}, + all_ss_ids = {['fc218a97ffea02d88a808f2706c966874c998ce5']}, } @article{Mend09a, @@ -16133,6 +19940,8 @@ @article{Mend09a gsid = {8629946616599488980}, gscites = {104}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/81291}, + ss_id = {5fd0e531592071d01d4286f08566a86362e26df9}, + all_ss_ids = {['5fd0e531592071d01d4286f08566a86362e26df9']}, } @conference{Mend09b, @@ -16157,7 +19966,9 @@ @inproceedings{Mend10 optnote = {4DCT, DIAG, RADIOLOGY}, month = {3}, gsid = {1760708134553287553}, - gscites = {5}, + gscites = {6}, + ss_id = {824ab3050caa860dc50cbe8859a5b2249588a5e2}, + all_ss_ids = {['824ab3050caa860dc50cbe8859a5b2249588a5e2']}, } @article{Mend10a, @@ -16175,8 +19986,10 @@ @article{Mend10a pmid = {20632608}, month = {5}, gsid = {17637183029799425094}, - gscites = {32}, + gscites = {35}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/88438}, + ss_id = {375ac3921f2fe9516b01a35a37aedd05f2093836}, + all_ss_ids = {['375ac3921f2fe9516b01a35a37aedd05f2093836', '5aa81fedd2e09d24fd1ba706782a6ce24cde5483']}, } @phdthesis{Mend10b, @@ -16209,6 +20022,8 @@ @article{Mend11 gsid = {12609398248666249823}, gscites = {84}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/98391}, + ss_id = {12e543454c9b14e372718e9618fa40685db83ad5}, + all_ss_ids = {['12e543454c9b14e372718e9618fa40685db83ad5']}, } @article{Mend12, @@ -16228,6 +20043,8 @@ @article{Mend12 gsid = {12508173963296523395}, gscites = {11}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/108266}, + ss_id = {b10df775262da5a98ec6cf85403a7bbfd26c1aeb}, + all_ss_ids = {['b10df775262da5a98ec6cf85403a7bbfd26c1aeb']}, } @inproceedings{Mend15a, @@ -16243,6 +20060,27 @@ @inproceedings{Mend15a optnote = {DIAG, RADIOLOGY}, gsid = {4332605455572363117}, gscites = {3}, + ss_id = {5e8d92d23959661851fa64fee44597df24b828b2}, + all_ss_ids = {['5e8d92d23959661851fa64fee44597df24b828b2']}, +} + +@article{Meno23, + author = {Menotti, Laura and Silvello, Gianmaria and Atzori, Manfredo and Boytcheva, Svetla and Ciompi, Francesco and Di Nunzio, Giorgio Maria and Fraggetta, Filippo and Giachelle, Fabio and Irrera, Ornella and Marchesin, Stefano and Marini, Niccol\`{o} and M\"{u}ller, Henning and Primov, Todor}, + title = {Modelling digital health data: The ExaMode ontology for computational pathology}, + doi = {10.1016/j.jpi.2023.100332}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.jpi.2023.100332}, + file = {Meno23.pdf:pdf\Meno23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Pathology Informatics}, + citation-count = {0}, + automatic = {yes}, + pages = {100332}, + volume = {14}, + ss_id = {2ca6685fcb499bee0f054df1fdc4b3ae298be614}, + all_ss_ids = {['2ca6685fcb499bee0f054df1fdc4b3ae298be614']}, + gscites = {0}, } @inproceedings{Merc19, @@ -16254,6 +20092,9 @@ @inproceedings{Merc19 abstract = {Detection of epithelial cells has powerful implications such as being an integral part of nuclear pleomorphism scoring for breast cancer grading. We exploit the point annotations inside nuclei boundaries to estimate their bounding boxes using empirical analysis on the cell bodies and the coarse instance segmentation masks obtained from an image segmentation algorithm. Our experiments show that training a state-of-the-art object detection network with a recently proposed optimizer on simple bounding box estimations performs promising epithelial cell detection, achieving a mean average precision (mAP) score of 71.36\% on tumor and 59.65\% on benign cells in the test set.}, file = {Merc19.pdf:pdf\\Merc19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {c3533243da19c79f7cf819315b7b9ed2e603a6c2}, + all_ss_ids = {['c3533243da19c79f7cf819315b7b9ed2e603a6c2']}, + gscites = {6}, } @inproceedings{Merc20, @@ -16267,6 +20108,9 @@ @inproceedings{Merc20 file = {:pdf/Merc20.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {4}, + ss_id = {e8abcf480f30bf5c079023c0a5bf80308dcd78ab}, + all_ss_ids = {['e8abcf480f30bf5c079023c0a5bf80308dcd78ab']}, + gscites = {22}, } @conference{Merc20a, @@ -16278,6 +20122,37 @@ @conference{Merc20a optnote = {DIAG}, } +@article{Merc20b, + author = {Mercan, Caner and Balkenhol, Maschenka and Salgado, Roberto and Sherman, Mark and Vielh, Philippe and Vreuls, Willem and Polonia, Antonio and Horlings, Hugo M. and Weichert, Wilko and Carter, Jodi M. and Bult, Peter and Christgen, Matthias and Denkert, Carsten and van de Vijver, Koen and van der Laak, Jeroen and Ciompi, Francesco}, + title = {~~Automated Scoring of Nuclear Pleomorphism Spectrum with Pathologist-level Performance in Breast Cancer}, + doi = {10.48550/ARXIV.2012.04974}, + year = {2020}, + abstract = {Nuclear pleomorphism, defined herein as the extent of abnormalities in the overall appearance of tumor nuclei, is one of the components of the three-tiered breast cancer grading. Given that nuclear pleomorphism reflects a continuous spectrum of variation, we trained a deep neural network on a large variety of tumor regions from the collective knowledge of several pathologists, without constraining the network to the traditional three-category classification. We also motivate an additional approach in which we discuss the additional benefit of normal epithelium as baseline, following the routine clinical practice where pathologists are trained to score nuclear pleomorphism in tumor, having the normal breast epithelium for comparison. In multiple experiments, our fully-automated approach could achieve top pathologist-level performance in select regions of interest as well as at whole slide images, compared to ten and four pathologists, respectively.}, + url = {https://arxiv.org/abs/2012.04974}, + file = {Merc20b.pdf:pdf\\Merc20b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2012.04974}, + automatic = {yes}, +} + +@article{Merc22, + author = {Mercan, Caner and Balkenhol, Maschenka and Salgado, Roberto and Sherman, Mark and Vielh, Philippe and Vreuls, Willem and Polonia, Antonio and Horlings, Hugo M. and Weichert, Wilko and Carter, Jodi M. and Bult, Peter and Christgen, Matthias and Denkert, Carsten and van de Vijver, Koen and Bokhorst, John-Melle and van der Laak, Jeroen and Ciompi, Francesco}, + title = {Deep learning for fully-automated nuclear pleomorphism scoring in breast cancer.}, + doi = {10.1038/s41523-022-00488-w}, + issue = {1}, + pages = {120}, + volume = {8}, + abstract = {To guide the choice of treatment, every new breast cancer is assessed for aggressiveness (i.e., graded) by an experienced histopathologist. Typically, this tumor grade consists of three components, one of which is the nuclear pleomorphism score (the extent of abnormalities in the overall appearance of tumor nuclei). The degree of nuclear pleomorphism is subjectively classified from 1 to 3, where a score of 1 most closely resembles epithelial cells of normal breast epithelium and 3 shows the greatest abnormalities. Establishing numerical criteria for grading nuclear pleomorphism is challenging, and inter-observer agreement is poor. Therefore, we studied the use of deep learning to develop fully automated nuclear pleomorphism scoring in breast cancer. The reference standard used for training the algorithm consisted of the collective knowledge of an international panel of 10 pathologists on a curated set of regions of interest covering the entire spectrum of tumor morphology in breast cancer. To fully exploit the information provided by the pathologists, a first-of-its-kind deep regression model was trained to yield a continuous scoring rather than limiting the pleomorphism scoring to the standard three-tiered system. Our approach preserves the continuum of nuclear pleomorphism without necessitating a large data set with explicit annotations of tumor nuclei. Once translated to the traditional system, our approach achieves top pathologist-level performance in multiple experiments on regions of interest and whole-slide images, compared to a panel of 10 and 4 pathologists, respectively.}, + file = {Merc22.pdf:pdf\\Merc22.pdf:PDF}, + journal = {NPJ breast cancer}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36347887}, + year = {2022}, + ss_id = {76f8e809b6ffac6eac0806d590c0c2519d75dac2}, + all_ss_ids = {['76f8e809b6ffac6eac0806d590c0c2519d75dac2']}, + gscites = {6}, +} + @article{Mert12, author = {Mertzanidou, Thomy and Hipwell, John and Cardoso, M Jorge and Zhang, Xiying and Tanner, Christine and Ourselin, Sebastien and Bick, Ulrich and Huisman, Henkjan and Karssemeijer, Nico and Hawkes, David}, title = {{MRI} to X-ray mammography registration using a volume-preserving affine transformation}, @@ -16293,8 +20168,10 @@ @article{Mert12 pmid = {22513136}, month = {7}, gsid = {877286232400307497}, - gscites = {26}, + gscites = {28}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/108597}, + ss_id = {a04e943e648173c7944c554bc587960765feb114}, + all_ss_ids = {['a04e943e648173c7944c554bc587960765feb114']}, } @inproceedings{Mert16, @@ -16311,7 +20188,9 @@ @inproceedings{Mert16 file = {:pdf/Mert16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {2930968711439371239}, - gscites = {3}, + gscites = {4}, + ss_id = {3487257137f253ce44733bc433e14249a87c07ad}, + all_ss_ids = {['3487257137f253ce44733bc433e14249a87c07ad']}, } @article{Mert17, @@ -16324,23 +20203,41 @@ @article{Mert17 pages = {935-948}, doi = {10.1002/mp.12077}, abstract = {PURPOSE: - In breast imaging, radiological in vivo images, such as x-ray mammography and magnetic resonance imaging (MRI), are used for tumor detection, diagnosis, and size determination. After excision, the specimen is typically sliced into slabs and a small subset is sampled. Histopathological imaging of the stained samples is used as the gold standard for characterization of the tumor microenvironment. A 3D volume reconstruction of the whole specimen from the 2D slabs could facilitate bridging the gap between histology and in vivo radiological imaging. This task is challenging, however, due to the large deformation that the breast tissue undergoes after surgery and the significant undersampling of the specimen obtained in histology. In this work, we present a method to reconstruct a coherent 3D volume from 2D digital radiographs of the specimen slabs. - - METHODS: - To reconstruct a 3D breast specimen volume, we propose the use of multiple target neighboring slices, when deforming each 2D slab radiograph in the volume, rather than performing pairwise registrations. The algorithm combines neighborhood slice information with free-form deformations, which enables a flexible, nonlinear deformation to be computed subject to the constraint that a coherent 3D volume is obtained. The neighborhood information provides adequate constraints, without the need for any additional regularization terms. - - RESULTS: - The volume reconstruction algorithm is validated on clinical mastectomy samples using a quantitative assessment of the volume reconstruction smoothness and a comparison with a whole specimen 3D image acquired for validation before slicing. Additionally, a target registration error of 5 mm (comparable to the specimen slab thickness of 4 mm) was obtained for five cases. The error was computed using manual annotations from four observers as gold standard, with interobserver variability of 3.4 mm. Finally, we illustrate how the reconstructed volumes can be used to map histology images to a 3D specimen image of the whole sample (either MRI or CT). - - CONCLUSIONS: - Qualitative and quantitative assessment has illustrated the benefit of using our proposed methodology to reconstruct a coherent specimen volume from serial slab radiographs. To our knowledge, this is the first method that has been applied to clinical breast cases, with the goal of reconstructing a whole specimen sample. The algorithm can be used as part of the pipeline of mapping histology images to ex vivo and ultimately in vivo radiological images of the breast.}, + In breast imaging, radiological in vivo images, such as x-ray mammography and magnetic resonance imaging (MRI), are used for tumor detection, diagnosis, and size determination. After excision, the specimen is typically sliced into slabs and a small subset is sampled. Histopathological imaging of the stained samples is used as the gold standard for characterization of the tumor microenvironment. A 3D volume reconstruction of the whole specimen from the 2D slabs could facilitate bridging the gap between histology and in vivo radiological imaging. This task is challenging, however, due to the large deformation that the breast tissue undergoes after surgery and the significant undersampling of the specimen obtained in histology. In this work, we present a method to reconstruct a coherent 3D volume from 2D digital radiographs of the specimen slabs. + + METHODS: + To reconstruct a 3D breast specimen volume, we propose the use of multiple target neighboring slices, when deforming each 2D slab radiograph in the volume, rather than performing pairwise registrations. The algorithm combines neighborhood slice information with free-form deformations, which enables a flexible, nonlinear deformation to be computed subject to the constraint that a coherent 3D volume is obtained. The neighborhood information provides adequate constraints, without the need for any additional regularization terms. + + RESULTS: + The volume reconstruction algorithm is validated on clinical mastectomy samples using a quantitative assessment of the volume reconstruction smoothness and a comparison with a whole specimen 3D image acquired for validation before slicing. Additionally, a target registration error of 5 mm (comparable to the specimen slab thickness of 4 mm) was obtained for five cases. The error was computed using manual annotations from four observers as gold standard, with interobserver variability of 3.4 mm. Finally, we illustrate how the reconstructed volumes can be used to map histology images to a 3D specimen image of the whole sample (either MRI or CT). + + CONCLUSIONS: + Qualitative and quantitative assessment has illustrated the benefit of using our proposed methodology to reconstruct a coherent specimen volume from serial slab radiographs. To our knowledge, this is the first method that has been applied to clinical breast cases, with the goal of reconstructing a whole specimen sample. The algorithm can be used as part of the pipeline of mapping histology images to ex vivo and ultimately in vivo radiological images of the breast.}, file = {Mert17.pdf:pdf\\Mert17.pdf:PDF}, optnote = {DIAG}, pmid = {28064435}, month = {3}, gsid = {2215474408889452803}, - gscites = {11}, + gscites = {17}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/170535}, + ss_id = {5eaff5ff94bfb7d25ef87679e9fcf243346b30e5}, + all_ss_ids = {['5eaff5ff94bfb7d25ef87679e9fcf243346b30e5']}, +} + +@article{Mesk17, + author = {Mesker, W. and van Pelt, G. and Huijbers, A. and van der Laak, J. and Dequeker, E. and Fl\'{e}jou, J-F. and Al Dieri, R. and Kerr, D. and Van Krieken, J. and Tollenaar, R.}, + title = {~~Improving treatment decisions in colon cancer: The tumor-stroma ratio (TSR) additional to the TNM classification}, + doi = {10.1093/annonc/mdx393.087}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1093/annonc/mdx393.087}, + file = {Mesk17.pdf:pdf\\Mesk17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Annals of Oncology}, + automatic = {yes}, + citation-count = {0}, + pages = {v190-v191}, + volume = {28}, } @article{Mets11a, @@ -16358,7 +20255,9 @@ @article{Mets11a pmid = {22028353}, month = {10}, gsid = {9535176740666077921}, - gscites = {122}, + gscites = {126}, + ss_id = {ccd229b5d8ea29c484d4fa8fa9823899afb2a061}, + all_ss_ids = {['ccd229b5d8ea29c484d4fa8fa9823899afb2a061']}, } @article{Mets12, @@ -16377,6 +20276,8 @@ @article{Mets12 month = {8}, gsid = {3220250186597997230}, gscites = {70}, + ss_id = {edc0d900d33d2ee0013dbb130d4f7956d77ebba9}, + all_ss_ids = {['edc0d900d33d2ee0013dbb130d4f7956d77ebba9']}, } @article{Mets12a, @@ -16394,6 +20295,8 @@ @article{Mets12a month = {12}, gsid = {12983185550555310641}, gscites = {114}, + ss_id = {5b54d066b85be837e4896989f53d10e0b13dbecb}, + all_ss_ids = {['5b54d066b85be837e4896989f53d10e0b13dbecb']}, } @article{Mets12e, @@ -16411,7 +20314,9 @@ @article{Mets12e pmid = {22826394}, month = {8}, gsid = {9417645695025612100}, - gscites = {35}, + gscites = {51}, + ss_id = {177331ab3937bb17a77cf453ba6684d3ac028b22}, + all_ss_ids = {['177331ab3937bb17a77cf453ba6684d3ac028b22']}, } @article{Mets12h, @@ -16428,7 +20333,9 @@ @article{Mets12h pmid = {23064488}, month = {10}, gsid = {8262566518944908606}, - gscites = {52}, + gscites = {54}, + ss_id = {2012c7e5e9a96cafa9128f057db62ba372e3c702}, + all_ss_ids = {['2012c7e5e9a96cafa9128f057db62ba372e3c702']}, } @article{Mets13a, @@ -16447,6 +20354,8 @@ @article{Mets13a month = {4}, gsid = {8130074603070439298}, gscites = {13}, + ss_id = {4e79aacf36a21b972e387d566141177e549469a0}, + all_ss_ids = {['4e79aacf36a21b972e387d566141177e549469a0']}, } @article{Mets13b, @@ -16463,7 +20372,9 @@ @article{Mets13b number = {1}, pmid = {23711184}, gsid = {14303730188902322930}, - gscites = {51}, + gscites = {68}, + ss_id = {1e9cef47e4a4180ed74b89301ef2dbec30b66990}, + all_ss_ids = {['1e9cef47e4a4180ed74b89301ef2dbec30b66990']}, } @article{Mets13c, @@ -16498,7 +20409,9 @@ @article{Mets16 pmid = {26945759}, month = {3}, gsid = {16090325245936356049}, - gscites = {19}, + gscites = {24}, + ss_id = {e571e6bec0974bb3d8b508183075258b783c9ec7}, + all_ss_ids = {['e571e6bec0974bb3d8b508183075258b783c9ec7']}, } @article{Mets17, @@ -16518,6 +20431,8 @@ @article{Mets17 pmid = {27255399}, gsid = {6398901592421084320}, gscites = {15}, + ss_id = {943bdf087596bb850f860c4074bfff718ffc7ce8}, + all_ss_ids = {['943bdf087596bb850f860c4074bfff718ffc7ce8']}, } @article{Mets18, @@ -16533,7 +20448,9 @@ @article{Mets18 optnote = {DIAG, RADIOLOGY}, pmid = {28986629}, gsid = {8930722888609062115}, - gscites = {15}, + gscites = {27}, + ss_id = {eb383f9d03fc189c9b3c7bb2e7132d5fdb07f067}, + all_ss_ids = {['eb383f9d03fc189c9b3c7bb2e7132d5fdb07f067']}, } @article{Mets18a, @@ -16550,8 +20467,44 @@ @article{Mets18a year = {2018}, month = {4}, gsid = {13243596039614426394}, - gscites = {3}, + gscites = {11}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/191051}, + ss_id = {6a340e97fd3c7c5f9f2f5401d1a2fb07c39fb8ed}, + all_ss_ids = {['8c9301f67b46bbac884c588232ebca05dd3ba8ae', '6a340e97fd3c7c5f9f2f5401d1a2fb07c39fb8ed']}, +} + +@article{Mets18b, + author = {Mets, Onno M. and Schaefer-Prokop, Cornelia M. and de Jong, Pim A.}, + title = {~~Cyst-related primary lung malignancies: an important and relatively unknown imaging appearance of (early) lung cancer}, + doi = {10.1183/16000617.0079-2018}, + year = {2018}, + abstract = {It is well known that lung cancer can manifest itself in imaging as solid and subsolid nodules or masses. However, in this era of increased computed tomography use another morphological computed tomography appearance of lung cancer is increasingly being recognised, presenting as a malignancy in relation to cystic airspaces. Despite the fact that it seems to be a relatively common finding in daily practice, literature on this entity is scarce and presumably the overall awareness is limited. This can lead to misinterpretation and delay in diagnosis and, therefore, increased awareness is urgently needed. This review aims to illustrate the imaging appearances of cyst-related primary lung malignancies, demonstrate its mimickers and potential pitfalls, and discuss the clinical implications based on the available literature and our own experience in four different hospitals.}, + url = {http://dx.doi.org/10.1183/16000617.0079-2018}, + file = {Mets18b.pdf:pdf\\Mets18b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Respiratory Review}, + automatic = {yes}, + citation-count = {12}, + pages = {180079}, + volume = {27}, + pmid = {30385691}, +} + +@article{Mets18b, + author = {Mets, Onno M. and Schaefer-Prokop, Cornelia M. and de Jong, Pim A.}, + title = {~~Cyst-related primary lung malignancies: an important and relatively unknown imaging appearance of (early) lung cancer}, + doi = {10.1183/16000617.0079-2018}, + year = {2018}, + abstract = {It is well known that lung cancer can manifest itself in imaging as solid and subsolid nodules or masses. However, in this era of increased computed tomography use another morphological computed tomography appearance of lung cancer is increasingly being recognised, presenting as a malignancy in relation to cystic airspaces. Despite the fact that it seems to be a relatively common finding in daily practice, literature on this entity is scarce and presumably the overall awareness is limited. This can lead to misinterpretation and delay in diagnosis and, therefore, increased awareness is urgently needed. This review aims to illustrate the imaging appearances of cyst-related primary lung malignancies, demonstrate its mimickers and potential pitfalls, and discuss the clinical implications based on the available literature and our own experience in four different hospitals.}, + url = {http://dx.doi.org/10.1183/16000617.0079-2018}, + file = {Mets18b.pdf:pdf\\Mets18b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Respiratory Review}, + automatic = {yes}, + citation-count = {12}, + pages = {180079}, + volume = {27}, + pmid = {30385691}, } @article{Meye20, @@ -16561,6 +20514,8 @@ @article{Meye20 year = {2020}, abstract = {Background and Objective: Accurate and reliable segmentation of the prostate gland in MR images can support the clinical assessment of prostate cancer, as well as the planning and monitoring of focal and loco-regional therapeutic interventions. Despite the availability of multi-planar MR scans due to standardized protocols, the majority of segmentation approaches presented in the literature consider the axial scans only. Methods: We propose an anisotropic 3D multi-stream CNN architecture, which processes additional scan directions to produce a higher-resolution isotropic prostate segmentation. We investigate two variants of our architecture, which work on two (dual-plane) and three (triple-plane) image orientations, respectively. We compare them with the standard baseline (single-plane) used in literature, i.e., plain axial segmentation. To realize a fair comparison, we employ a hyperparameter optimization strategy to select optimal configurations for the individual approaches. Results: Training and evaluation on two datasets spanning multiple sites obtain statistical significant improvement over the plain axial segmentation ($p<0.05$ on the Dice similarity coefficient). The improvement can be observed especially at the base ($0.898$ single-plane vs. $0.906$ triple-plane) and apex ($0.888$ single-plane vs. $0.901$ dual-plane). Conclusion: This study indicates that models employing two or three scan directions are superior to plain axial segmentation. The knowledge of precise boundaries of the prostate is crucial for the conservation of risk structures. Thus, the proposed models have the potential to improve the outcome of prostate cancer diagnosis and therapies.}, file = {:http\://arxiv.org/pdf/2009.11120v1:PDF}, + all_ss_ids = {['4607ec477007deebd18b51e8ba787beae8647303']}, + gscites = {26}, } @article{Meye20a, @@ -16574,6 +20529,9 @@ @article{Meye20a abstract = {Background and Objective: Accurate and reliable segmentation of the prostate gland in MR images can support the clinical assessment of prostate cancer, as well as the planning and monitoring of focal and loco-regional therapeutic interventions. Despite the availability of multi-planar MR scans due to standardized protocols, the majority of segmentation approaches presented in the literature consider the axial scans only. In this work, we investigate whether a neural network processing anisotropic multi-planar images could work in the context of a semantic segmentation task, and if so, how this additional information would improve the segmentation quality. Methods: We propose an anisotropic 3D multi-stream CNN architecture, which processes additional scan directions to produce a high-resolution isotropic prostate segmentation. We investigate two variants of our architecture, which work on two (dual-plane) and three (triple-plane) image orientations, respectively. The influence of additional information used by these models is evaluated by comparing them with a single-plane baseline processing only axial images. To realize a fair comparison, we employ a hyperparameter optimization strategy to select optimal configurations for the individual approaches. Results: Training and evaluation on two datasets spanning multiple sites show statistical significant improvement over the plain axial segmentation (on the Dice similarity coefficient). The improvement can be observed especially at the base (0.898 single-plane vs. 0.906 triple-plane) and apex (0.888 single-plane vs. 0.901 dual-plane). Conclusion: This study indicates that models employing two or three scan directions are superior to plain axial segmentation. The knowledge of precise boundaries of the prostate is crucial for the conservation of risk structures. Thus, the proposed models have the potential to improve the outcome of prostate cancer diagnosis and therapies.}, url = {https:\\arxiv.org/abs/2009.11120}, file = {:pdf\\Meye20.pdf:PDF}, + ss_id = {4607ec477007deebd18b51e8ba787beae8647303}, + all_ss_ids = {['4607ec477007deebd18b51e8ba787beae8647303']}, + gscites = {26}, } @inproceedings{Mich20, @@ -16582,6 +20540,9 @@ @inproceedings{Mich20 booktitle = {6th International Conference on Image Formation in X-Ray Computed Tomography}, year = {2020}, optnote = {DIAG, RADIOLOGY}, + ss_id = {b02dc7bfc3211dd0113ce1ccd69dc17462083eaf}, + all_ss_ids = {['b02dc7bfc3211dd0113ce1ccd69dc17462083eaf']}, + gscites = {0}, } @article{Mich21a, @@ -16593,6 +20554,9 @@ @article{Mich21a url = {https://doi.org/10.1007/s00330-021-08394-8}, abstract = {Multiparametric MRI has high diagnostic accuracy for detecting prostate cancer, but non-invasive prediction of tumor grade remains challenging. Characterizing tumor perfusion by exploiting the fractal nature of vascular anatomy might elucidate the aggressive potential of a tumor. This study introduces the concept of fractal analysis for characterizing prostate cancer perfusion and reports about its usefulness for non-invasive prediction of tumor grade.}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/252017}, + ss_id = {32fb40cb7d2abc53a1f56c94fe592b0c39870901}, + all_ss_ids = {['32fb40cb7d2abc53a1f56c94fe592b0c39870901']}, + gscites = {10}, } @article{Mich21b, @@ -16604,6 +20568,43 @@ @article{Mich21b url = {https://doi.org/10.1007/s00330-021-08358-y}, abstract = {Multiparametric MRI with Prostate Imaging Reporting and Data System (PI-RADS) assessment is sensitive but not specific for detecting clinically significant prostate cancer. This study validates the diagnostic accuracy of the recently suggested fractal dimension (FD) of perfusion for detecting clinically significant cancer.}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/249481}, + ss_id = {926e78dce5e42624d849dde4be1e8cdab215c42a}, + all_ss_ids = {['926e78dce5e42624d849dde4be1e8cdab215c42a']}, + gscites = {2}, +} + +@article{Mies22, + author = {Miesen, Laura and B\'{a}ndi, P\'{e}ter and Willemsen, Brigith and Mooren, Fieke and Strieder, Thiago and Boldrini, Eva and Drenic, Vedran and Eymael, Jennifer and Wetzels, Roy and Lotz, Johannes and Weiss, Nick and Steenbergen, Eric and van Kuppevelt, Toin H. and van Erp, Merijn and van der Laak, Jeroen and Endlich, Nicole and Moeller, Marcus J. and Wetzels, Jack F. M. and Jansen, Jitske and Smeets, Bart}, + title = {~~Parietal epithelial cells maintain the epithelial cell continuum forming Bowman's space in focal segmental glomerulosclerosis}, + doi = {10.1242/dmm.046342}, + year = {2022}, + abstract = {ABSTRACT + In the glomerulus, Bowman's space is formed by a continuum of glomerular epithelial cells. In focal segmental glomerulosclerosis (FSGS), glomeruli show segmental scarring, a result of activated parietal epithelial cells (PECs) invading the glomerular tuft. The segmental scars interrupt the epithelial continuum. However, non-sclerotic segments seem to be preserved even in glomeruli with advanced lesions. We studied the histology of the segmental pattern in Munich Wistar Fr\"{o}mter rats, a model for secondary FSGS. Our results showed that matrix layers lined with PECs cover the sclerotic lesions. These PECs formed contacts with podocytes of the uninvolved tuft segments, restoring the epithelial continuum. Formed Bowman's spaces were still connected to the tubular system. In biopsies of patients with secondary FSGS, we also detected matrix layers formed by PECs, separating the uninvolved from the sclerotic glomerular segments. PECs have a major role in the formation of glomerulosclerosis; we show here that in FSGS they also restore the glomerular epithelial cell continuum that surrounds Bowman's space. This process may be beneficial and indispensable for glomerular filtration in the uninvolved segments of sclerotic glomeruli.}, + url = {http://dx.doi.org/10.1242/dmm.046342}, + file = {Mies22.pdf:pdf\\Mies22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Disease Models & Mechanisms}, + automatic = {yes}, + citation-count = {3}, + volume = {15}, + pmid = {34927672}, +} + +@article{Mile17, + author = {Milenkovi\'{c}, Jana and Dalm\i\c{s}, Mehmet Ufuk and Zgajnar, Janez and Platel, Bram}, + title = {~~Textural analysis of early-phase spatiotemporal changes in contrast enhancement of breast lesions imaged with an ultrafast DCE-MRI protocol}, + doi = {10.1002/mp.12408}, + year = {2017}, + abstract = {PurposeNew ultrafast view-sharing sequences have enabled breast dynamic contrast-enhanced magnetic resonance imaging (DCE-MRI) to be performed at high spatial and temporal resolution. The aim of this study is to evaluate the diagnostic potential of textural features that quantify the spatiotemporal changes of the contrast-agent uptake in computer-aided diagnosis of malignant and benign breast lesions imaged with high spatial and temporal resolution DCE-MRI.MethodThe proposed approach is based on the textural analysis quantifying the spatial variation of six dynamic features of the early-phase contrast-agent uptake of a lesion's largest cross-sectional area. The textural analysis is performed by means of the second-order gray-level co-occurrence matrix, gray-level run-length matrix and gray-level difference matrix. This yields 35 textural features to quantify the spatial variation of each of the six dynamic features, providing a feature set of 210 features in total. The proposed feature set is evaluated based on receiver operating characteristic (ROC) curve analysis in a cross-validation scheme for random forests (RF) and two support vector machine classifiers, with linear and radial basis function (RBF) kernel. Evaluation is done on a dataset with 154 breast lesions (83 malignant and 71 benign) and compared to a previous approach based on 3D morphological features and the average and standard deviation of the same dynamic features over the entire lesion volume as well as their average for the smaller region of the strongest uptake rate.ResultThe area under the ROC curve (AUC) obtained by the proposed approach with the RF classifier was 0.8997, which was significantly higher (P = 0.0198) than the performance achieved by the previous approach (AUC = 0.8704) on the same dataset. Similarly, the proposed approach obtained a significantly higher result for both SVM classifiers with RBF (P = 0.0096) and linear kernel (P = 0.0417) obtaining AUC of 0.8876 and 0.8548, respectively, compared to AUC values of previous approach of 0.8562 and 0.8311, respectively.ConclusionThe proposed approach based on 2D textural features quantifying spatiotemporal changes of the contrast-agent uptake significantly outperforms the previous approach based on 3D morphology and dynamic analysis in differentiating the malignant and benign breast lesions, showing its potential to aid clinical decision making.}, + url = {http://dx.doi.org/10.1002/mp.12408}, + file = {Mile17.pdf:pdf\\Mile17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Physics}, + automatic = {yes}, + citation-count = {14}, + pages = {4652-4664}, + volume = {44}, + pmid = {28622412}, } @article{Moha11, @@ -16620,7 +20621,9 @@ @article{Moha11 pmid = {21474499}, month = {4}, gsid = {2503222556262111335,16225817165193692000}, - gscites = {125}, + gscites = {158}, + ss_id = {11f3274c97ef62ae0fc888928ce0c939d03c79f7}, + all_ss_ids = {['11f3274c97ef62ae0fc888928ce0c939d03c79f7']}, } @article{Moha13, @@ -16637,7 +20640,9 @@ @article{Moha13 number = {1}, pmid = {23688060}, gsid = {283001529912021713}, - gscites = {24}, + gscites = {27}, + ss_id = {e0a4f8f3d89cf5b3bbf5db1be0ab370896e68498}, + all_ss_ids = {['e0a4f8f3d89cf5b3bbf5db1be0ab370896e68498']}, } @article{Moha13a, @@ -16654,7 +20659,9 @@ @article{Moha13a pmid = {24035313}, month = {1}, gsid = {5267033159797573513}, - gscites = {26}, + gscites = {33}, + ss_id = {955dc85fbe185045d897252e2c8cc45e851d95ae}, + all_ss_ids = {['955dc85fbe185045d897252e2c8cc45e851d95ae']}, } @article{Moha14, @@ -16672,7 +20679,9 @@ @article{Moha14 year = {2014}, month = {8}, gsid = {12560823050881457785}, - gscites = {43}, + gscites = {46}, + ss_id = {37152c02e2e92a6c796c062ccb749974197d4f0c}, + all_ss_ids = {['37152c02e2e92a6c796c062ccb749974197d4f0c']}, } @article{Moha15, @@ -16690,7 +20699,9 @@ @article{Moha15 pmid = {25614166}, month = {1}, gsid = {13584450798353585582}, - gscites = {24}, + gscites = {47}, + ss_id = {5d64335056fe6f14983a92010a3b6ae8c92d4b73}, + all_ss_ids = {['5d64335056fe6f14983a92010a3b6ae8c92d4b73']}, } @conference{Moha16, @@ -16721,16 +20732,18 @@ @article{Mooi18 optnote = {DIAG}, month = {6}, gsid = {11990056638194684452}, - gscites = {10}, + gscites = {14}, + ss_id = {484e166d13638c7a458fc95ce81026d30bedec10}, + all_ss_ids = {['484e166d13638c7a458fc95ce81026d30bedec10']}, } @mastersthesis{Mooi19, author = {Germonda Mooij}, title = {Using GANs to synthetically stain histopathological images to generate training data for automatic mitosis detection in breast tissue}, abstract = {Generative adversarial networks (GANs) have been proven effective at mapping medical images from one domain to another (e.g. from CT to MRI). - In this study we investigate the effectiveness of GANs at mapping images of breast tissue between histopathological stains. - Breast cancer is the most common cancer in women worldwide. Counting mitotic figures in histological images of breast cancer tissue has been shown to be a reliable and independent prognostic marker. Most successful methods for automatic counting involve training deep neural networks on H&E stained slides. This training requires extensive manual annotations of mitotic figures in H&E stained slides, which suffers from a low inter-observer agreement. Manual counting in PHH3 stained slides has a much higher inter-observer agreement. - In this project we aimed to train GANs to map PHH3 slides to synthetic H&E slides and vice versa. A mitosis classifier is used to quantify the quality of the synthetic images, by comparing its performance after training on synthetic images with training on real images.}, + In this study we investigate the effectiveness of GANs at mapping images of breast tissue between histopathological stains. + Breast cancer is the most common cancer in women worldwide. Counting mitotic figures in histological images of breast cancer tissue has been shown to be a reliable and independent prognostic marker. Most successful methods for automatic counting involve training deep neural networks on H&E stained slides. This training requires extensive manual annotations of mitotic figures in H&E stained slides, which suffers from a low inter-observer agreement. Manual counting in PHH3 stained slides has a much higher inter-observer agreement. + In this project we aimed to train GANs to map PHH3 slides to synthetic H&E slides and vice versa. A mitosis classifier is used to quantify the quality of the synthetic images, by comparing its performance after training on synthetic images with training on real images.}, file = {Mooi19.pdf:pdf/Mooi19.pdf:PDF}, optnote = {DIAG}, school = {Radboud University Medical Center}, @@ -16765,6 +20778,8 @@ @inproceedings{Moor18a abstract = {Computer-aided detection or decision support systems aim to improve breast cancer screening programs by helping radiologists to evaluate digital mammography (DM) exams. Commonly such methods proceed in two steps: selection of candidate regions for malignancy, and later classification as either malignant or not. In this study, we present a candidate detection method based on deep learning to automatically detect and additionally segment soft tissue lesions in DM. A database of DM exams (mostly bilateral and two views) was collected from our institutional archive.}, file = {:pdf/Moor18a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['9fca3cab7f8cf1071094591bc13ca53de528dac6']}, + gscites = {32}, } @article{Moor2018, @@ -16789,6 +20804,9 @@ @inproceedings{Mor18 file = {:pdf/Mori19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {3}, + ss_id = {aa0eeb67cdccfa3ea49185dc9d7711c21ab29dde}, + all_ss_ids = {['aa0eeb67cdccfa3ea49185dc9d7711c21ab29dde']}, + gscites = {10}, } @conference{Mord12, @@ -16814,6 +20832,8 @@ @inproceedings{Mord13 month = {3}, gsid = {6413494452242276462}, gscites = {1}, + ss_id = {2c75e1d3844da3a4152d44ed23d2281a84051083}, + all_ss_ids = {['2c75e1d3844da3a4152d44ed23d2281a84051083']}, } @inproceedings{Mord14, @@ -16829,6 +20849,8 @@ @inproceedings{Mord14 optnote = {DIAG}, gsid = {13385385247096114761}, gscites = {2}, + ss_id = {933b9110259e74592a7208444887f79a625210f3}, + all_ss_ids = {['933b9110259e74592a7208444887f79a625210f3']}, } @inproceedings{Mord15, @@ -16846,6 +20868,8 @@ @inproceedings{Mord15 month = {3}, gsid = {16648788988505657805}, gscites = {3}, + ss_id = {8c1ea2ea519e1512cb46b28cd7814c8af900dc06}, + all_ss_ids = {['8c1ea2ea519e1512cb46b28cd7814c8af900dc06']}, } @article{Mord16, @@ -16860,19 +20884,21 @@ @article{Mord16 doi = {10.1118/1.4943376}, url = {http://dx.doi.org/10.1118/1.4943376}, abstract = {PURPOSE: - In the past decades, computer-aided detection (CADe) systems have been developed to aid screening radiologists in the detection of malignant microcalcifications. These systems are useful to avoid perceptual oversights and can increase the radiologists' detection rate. However, due to the high number of false positives marked by these CADe systems, they are not yet suitable as an independent reader. Breast arterial calcifications (BACs) are one of the most frequent false positives marked by CADe systems. In this study, a method is proposed for the elimination of BACs as positive findings. Removal of these false positives will increase the performance of the CADe system in finding malignant microcalcifications. - METHODS: - A multistage method is proposed for the removal of BAC findings. The first stage consists of a microcalcification candidate selection, segmentation and grouping of the microcalcifications, and classification to remove obvious false positives. In the second stage, a case-based selection is applied where cases are selected which contain BACs. In the final stage, BACs are removed from the selected cases. The BACs removal stage consists of a GentleBoost classifier trained on microcalcification features describing their shape, topology, and texture. Additionally, novel features are introduced to discriminate BACs from other positive findings. - RESULTS: - The CADe system was evaluated with and without BACs removal. Here, both systems were applied on a validation set containing 1088 cases of which 95 cases contained malignant microcalcifications. After bootstrapping, free-response receiver operating characteristics and receiver operating characteristics analyses were carried out. Performance between the two systems was compared at 0.98 and 0.95 specificity. At a specificity of 0.98, the sensitivity increased from 37% to 52% and the sensitivity increased from 62% up to 76% at a specificity of 0.95. Partial areas under the curve in the specificity range of 0.8-1.0 were significantly different between the system without BACs removal and the system with BACs removal, 0.129 A+- 0.009 versus 0.144 A+- 0.008 (p<0.05), respectively. Additionally, the sensitivity at one false positive per 50 cases and one false positive per 25 cases increased as well, 37% versus 51% (p<0.05) and 58% versus 67% (p<0.05) sensitivity, respectively. Additionally, the CADe system with BACs removal reduces the number of false positives per case by 29% on average. The same sensitivity at one false positive per 50 cases in the CADe system without BACs removal can be achieved at one false positive per 80 cases in the CADe system with BACs removal. - CONCLUSIONS: - By using dedicated algorithms to detect and remove breast arterial calcifications, the performance of CADe systems can be improved, in particular, at false positive rates representative for operating points used in screening.}, + In the past decades, computer-aided detection (CADe) systems have been developed to aid screening radiologists in the detection of malignant microcalcifications. These systems are useful to avoid perceptual oversights and can increase the radiologists' detection rate. However, due to the high number of false positives marked by these CADe systems, they are not yet suitable as an independent reader. Breast arterial calcifications (BACs) are one of the most frequent false positives marked by CADe systems. In this study, a method is proposed for the elimination of BACs as positive findings. Removal of these false positives will increase the performance of the CADe system in finding malignant microcalcifications. + METHODS: + A multistage method is proposed for the removal of BAC findings. The first stage consists of a microcalcification candidate selection, segmentation and grouping of the microcalcifications, and classification to remove obvious false positives. In the second stage, a case-based selection is applied where cases are selected which contain BACs. In the final stage, BACs are removed from the selected cases. The BACs removal stage consists of a GentleBoost classifier trained on microcalcification features describing their shape, topology, and texture. Additionally, novel features are introduced to discriminate BACs from other positive findings. + RESULTS: + The CADe system was evaluated with and without BACs removal. Here, both systems were applied on a validation set containing 1088 cases of which 95 cases contained malignant microcalcifications. After bootstrapping, free-response receiver operating characteristics and receiver operating characteristics analyses were carried out. Performance between the two systems was compared at 0.98 and 0.95 specificity. At a specificity of 0.98, the sensitivity increased from 37% to 52% and the sensitivity increased from 62% up to 76% at a specificity of 0.95. Partial areas under the curve in the specificity range of 0.8-1.0 were significantly different between the system without BACs removal and the system with BACs removal, 0.129 A+- 0.009 versus 0.144 A+- 0.008 (p<0.05), respectively. Additionally, the sensitivity at one false positive per 50 cases and one false positive per 25 cases increased as well, 37% versus 51% (p<0.05) and 58% versus 67% (p<0.05) sensitivity, respectively. Additionally, the CADe system with BACs removal reduces the number of false positives per case by 29% on average. The same sensitivity at one false positive per 50 cases in the CADe system without BACs removal can be achieved at one false positive per 80 cases in the CADe system with BACs removal. + CONCLUSIONS: + By using dedicated algorithms to detect and remove breast arterial calcifications, the performance of CADe systems can be improved, in particular, at false positive rates representative for operating points used in screening.}, file = {:pdf/Mord16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {27036566}, publisher = {American Association of Physicists in Medicine ({AAPM})}, gsid = {13574103016601897640}, gscites = {12}, + ss_id = {f96c79f90cb53fb753033a0602753c114ba0e9fb}, + all_ss_ids = {['f96c79f90cb53fb753033a0602753c114ba0e9fb']}, } @inproceedings{Mord16b, @@ -16889,7 +20915,9 @@ @inproceedings{Mord16b file = {:pdf/Mord16b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {16371627561967415273}, - gscites = {49}, + gscites = {70}, + ss_id = {2c922a216c9684d539ffaf858cace091b2a2f7c7}, + all_ss_ids = {['2c922a216c9684d539ffaf858cace091b2a2f7c7']}, } @article{Mord17, @@ -16907,7 +20935,9 @@ @article{Mord17 pmid = {28182277}, month = {3}, gsid = {4803214541066334157}, - gscites = {5}, + gscites = {16}, + ss_id = {c8a213ffbfae47c7471de82e7a6c03b5e48006f6}, + all_ss_ids = {['c8a213ffbfae47c7471de82e7a6c03b5e48006f6']}, } @phdthesis{Mord18, @@ -16940,10 +20970,10 @@ @article{Mord18a pmid = {29043464}, gsid = {16257625997408044224}, gscites = {12}, + ss_id = {d6e204d39bc7fb333edc2336187986f5e665a775}, + all_ss_ids = {['d6e204d39bc7fb333edc2336187986f5e665a775']}, } - - @article{Mori17, author = {Moriakov, Nikita}, title = {On Effective Birkhoff's Ergodic Theorem for Computable Actions of Amenable Groups}, @@ -16965,6 +20995,8 @@ @inproceedings{Mori20 abstract = {Unpaired image-to-image translation has attracted significant interest due to the invention of CycleGAN, a method which utilizes a combination of adversarial and cycle consistency losses to avoid the need for paired data. It is known that the CycleGAN problem might admit multiple solutions, and our goal in this paper is to analyze the space of exact solutions and to give perturbation bounds for approximate solutions. We show theoretically that the exact solution space is invariant with respect to automorphisms of the underlying probability spaces, and, furthermore, that the group of automorphisms acts freely and transitively on the space of exact solutions. We examine the case of zero pure CycleGAN loss first in its generality, and, subsequently, expand our analysis to approximate solutions for extended CycleGAN loss where identity loss term is included. In order to demonstrate that these results are applicable, we show that under mild conditions nontrivial smooth automorphisms exist. Furthermore, we provide empirical evidence that neural networks can learn these automorphisms with unexpected and unwanted results. We conclude that finding optimal solutions to the CycleGAN loss does not necessarily lead to the envisioned result in image-to-image translation tasks and that underlying hidden symmetries can render the result useless.}, file = {Mori20.pdf:pdf\\Mori20.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {['6d37b7ceb99c6cc36be602ae9ab2a26232d11bb5', '8f938d6b54c38e469997969380383de635e654aa']}, + gscites = {9}, } @inproceedings{Mosh12, @@ -16979,6 +21011,34 @@ @inproceedings{Mosh12 month = {8}, } +@article{Mour23, + author = {van Mourik, Tim and Koopmans, Peter J. and Bains, Lauren J. and Norris, David G. and Jehee, Janneke F.M.}, + title = {~~Investigation of layer-specific BOLD signal in the human visual cortex during visual attention}, + doi = {10.52294/001c.87638}, + year = {2023}, + abstract = {Directing spatial attention towards a particular stimulus location enhances cortical responses at corresponding regions in cortex. How attention modulates the laminar response profile within the attended region remains unclear, however. In this paper, we use high-field (7T) functional magnetic resonance imaging to investigate the effects of attention on laminar activity profiles in areas V1-V3 both when a stimulus was presented to the observer and in the absence of visual stimulation. Replicating previous findings, we find robust increases in the overall BOLD response for attended regions in cortex, both with and without visual stimulation. When analysing the BOLD response across the individual layers in visual cortex, we observed no evidence for laminar-specific differentiation with attention. We offer several potential explanations for these results, including theoretical, methodological and technical reasons. Additionally, we provide all data and pipelines openly in order to promote analytic consistency across layer-specific studies and improve reproducibility.}, + url = {http://dx.doi.org/10.52294/001c.87638}, + file = {Mour23.pdf:pdf\\Mour23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Aperture Neuro}, + automatic = {yes}, + citation-count = {0}, + volume = {3}, +} + +@article{Muck20, + author = {Muckley, Matthew J. and Riemenschneider, Bruno and Radmanesh, Alireza and Kim, Sunwoo and Jeong, Geunu and Ko, Jingyu and Jun, Yohan and Shin, Hyungseob and Hwang, Dosik and Mostapha, Mahmoud and Arberet, Simon and Nickel, Dominik and Ramzi, Zaccharie and Ciuciu, Philippe and Starck, Jean-Luc and Teuwen, Jonas and Karkalousos, Dimitrios and Zhang, Chaoping and Sriram, Anuroop and Huang, Zhengnan and Yakubova, Nafissa and Lui, Yvonne and Knoll, Florian}, + title = {~~Results of the 2020 fastMRI Challenge for Machine Learning MR Image Reconstruction}, + doi = {10.48550/ARXIV.2012.06318}, + year = {2020}, + abstract = {Accelerating MRI scans is one of the principal outstanding problems in the MRI research community. Towards this goal, we hosted the second fastMRI competition targeted towards reconstructing MR images with subsampled k-space data. We provided participants with data from 7,299 clinical brain scans (de-identified via a HIPAA-compliant procedure by NYU Langone Health), holding back the fully-sampled data from 894 of these scans for challenge evaluation purposes. In contrast to the 2019 challenge, we focused our radiologist evaluations on pathological assessment in brain images. We also debuted a new Transfer track that required participants to submit models evaluated on MRI scanners from outside the training set. We received 19 submissions from eight different groups. Results showed one team scoring best in both SSIM scores and qualitative radiologist evaluations. We also performed analysis on alternative metrics to mitigate the effects of background noise and collected feedback from the participants to inform future challenges. Lastly, we identify common failure modes across the submissions, highlighting areas of need for future research in the MRI reconstruction community.}, + url = {https://arxiv.org/abs/2012.06318}, + file = {Muck20.pdf:pdf\\Muck20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2012.06318}, + automatic = {yes}, +} + @inproceedings{Muen09, author = {S. E. A. Muenzing and K. Murphy and B. van Ginneken and J. P. W. Pluim}, title = {Automatic detection of registration errors for quality assessment in medical image registration}, @@ -16994,6 +21054,8 @@ @inproceedings{Muen09 month = {2}, gsid = {479701880220897797}, gscites = {10}, + ss_id = {5710ee8046be85e4702f546f7e18f4c90ac48d87}, + all_ss_ids = {['5710ee8046be85e4702f546f7e18f4c90ac48d87']}, } @inproceedings{Muen10, @@ -17007,6 +21069,8 @@ @inproceedings{Muen10 optnote = {DIAG, RADIOLOGY}, gsid = {13737076938483379783}, gscites = {13}, + ss_id = {123ca7207e628ef99ea5ea90da2e726797c2ed02}, + all_ss_ids = {['123ca7207e628ef99ea5ea90da2e726797c2ed02']}, } @inproceedings{Muen12, @@ -17022,6 +21086,8 @@ @inproceedings{Muen12 optnote = {DIAG}, gsid = {16342785207326025803}, gscites = {9}, + ss_id = {2afc979ce2117c36a6bc93edc575b3dbd60c7170}, + all_ss_ids = {['2afc979ce2117c36a6bc93edc575b3dbd60c7170']}, } @article{Muen12a, @@ -17038,8 +21104,10 @@ @article{Muen12a pmid = {22981428}, month = {12}, gsid = {18008631097448211397}, - gscites = {43}, + gscites = {51}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/109635}, + ss_id = {93f83b026d86c5c2217e36b6b91383d1b8afe3c4}, + all_ss_ids = {['93f83b026d86c5c2217e36b6b91383d1b8afe3c4']}, } @inproceedings{Muen12b, @@ -17054,7 +21122,9 @@ @inproceedings{Muen12b optnote = {DIAG, RADIOLOGY}, month = {5}, gsid = {380465826604345313}, - gscites = {6}, + gscites = {9}, + ss_id = {8872a136642b7c45de906e5f3350d08da8915e31}, + all_ss_ids = {['8872a136642b7c45de906e5f3350d08da8915e31']}, } @article{Muen14, @@ -17074,6 +21144,8 @@ @article{Muen14 gsid = {17113415496324263537}, gscites = {28}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/137731}, + ss_id = {c2114441f5e7ab560314ec222a9ab9a38cb9a62b}, + all_ss_ids = {['c2114441f5e7ab560314ec222a9ab9a38cb9a62b']}, } @phdthesis{Muen14a, @@ -17104,8 +21176,9 @@ @article{Mull19 pmid = {31754628}, month = {11}, gsid = {7420919522018227404}, - gscites = {1}, + gscites = {13}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/212635}, + all_ss_ids = {['18d0a58647fbdaaf7cd278cb66ae548a1e5289e5', '981fb8239e2e32cd742059aa270015871aa64b61']}, } @article{Mulle20, @@ -17118,6 +21191,9 @@ @article{Mulle20 abstract = {Purpose: To investigate the inter-reader agreement for grading of retinal alterations in age-related macular degeneration (AMD) using a reading center setting. Methods: In this cross-sectional case series, spectral domain optical coherence tomography (OCT, Topcon 3D OCT, Tokyo, Japan) scans of 112 eyes of 112 patients with neovascular AMD (56 treatment-naive, 56 after three anti-vascular endothelial growth factor injections) were analyzed by four independent readers. Imaging features specific for AMD were annotated using a novel custom-built annotation platform. Dice score, Bland-Altman plots, coefficients of repeatability (CR), coefficients of variation (CV), and intraclass correlation coefficients (ICC) were assessed. Results: Loss of ellipsoid zone, pigment epithelium detachment, subretinal fluid, and Drusen were the most abundant features in our cohort. The features subretinal fluid, intraretinal fluid, hypertransmission, descent of the outer plexiform layer, and pigment epithelium detachment showed highest inter-reader agreement, while detection and measures of loss of ellipsoid zone and retinal pigment epithelium were more variable. The agreement on the size and location of the respective annotation was more consistent throughout all features. Conclusions: The inter-reader agreement depended on the respective OCT-based feature. A selection of reliable features might provide suitable surrogate markers for disease progression and possible treatment effects focusing on different disease stages. This might give opportunities to a more time- and cost-effective patient assessment and improved decision-making as well as have implications for clinical trials and training machine learning algorithms.}, optnote = {DIAG}, month = {10}, + ss_id = {67e43291243b436029c050ac11f62541bc3e2909}, + all_ss_ids = {['67e43291243b436029c050ac11f62541bc3e2909']}, + gscites = {12}, } @article{Muna21, @@ -17133,6 +21209,43 @@ @article{Muna21 pmid = {34122444}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/234187}, + ss_id = {0759829c3d1484b5ba08e60ffb8282207ab68a8b}, + all_ss_ids = {['0759829c3d1484b5ba08e60ffb8282207ab68a8b']}, + gscites = {15}, +} + +@article{Muna21a, + author = {Munari, Enrico and Mariotti, Francesca R. and Quatrini, Linda and Bertoglio, Pietro and Tumino, Nicola and Vacca, Paola and Eccher, Albino and Ciompi, Francesco and Brunelli, Matteo and Martignoni, Guido and Bogina, Giuseppe and Moretta, Lorenzo}, + title = {PD-1/PD-L1 in Cancer: Pathophysiological, Diagnostic and Therapeutic Aspects.}, + doi = {10.3390/ijms22105123}, + issue = {10}, + volume = {22}, + abstract = {Immune evasion is a key strategy adopted by tumor cells to escape the immune system while promoting their survival and metastatic spreading. Indeed, several mechanisms have been developed by tumors to inhibit immune responses. PD-1 is a cell surface inhibitory receptor, which plays a major physiological role in the maintenance of peripheral tolerance. In pathological conditions, activation of the PD-1/PD-Ls signaling pathway may block immune cell activation, a mechanism exploited by tumor cells to evade the antitumor immune control. Targeting the PD-1/PD-L1 axis has represented a major breakthrough in cancer treatment. Indeed, the success of PD-1 blockade immunotherapies represents an unprecedented success in the treatment of different cancer types. To improve the therapeutic efficacy, a deeper understanding of the mechanisms regulating PD-1 expression and signaling in the tumor context is required. We provide an overview of the current knowledge of PD-1 expression on both tumor-infiltrating T and NK cells, summarizing the recent evidence on the stimuli regulating its expression. We also highlight perspectives and limitations of the role of PD-L1 expression as a predictive marker, discuss well-established and novel potential approaches to improve patient selection and clinical outcome and summarize current indications for anti-PD1/PD-L1 immunotherapy.}, + file = {Muna21a.pdf:pdf\\Muna21a.pdf:PDF}, + journal = {International journal of molecular sciences}, + optnote = {DIAG, RADIOLOGY}, + pmid = {34066087}, + year = {2021}, + ss_id = {7e43e8a75e61c350cc95862bddae395e662a76cd}, + all_ss_ids = {['7e43e8a75e61c350cc95862bddae395e662a76cd']}, + gscites = {60}, +} + +@article{Muna22, + author = {Munari, Enrico and Querzoli, Giulia and Brunelli, Matteo and Marconi, Marcella and Sommaggio, Marco and Cocchi, Marco A. and Martignoni, Guido and Netto, George J. and Calio, Anna and Quatrini, Linda and Mariotti, Francesca R. and Luchini, Claudio and Girolami, Ilaria and Eccher, Albino and Segala, Diego and Ciompi, Francesco and Zamboni, Giuseppe and Moretta, Lorenzo and Bogina, Giuseppe}, + title = {Comparison of three validated PD-L1 immunohistochemical assays in urothelial carcinoma of the bladder: interchangeability and issues related to patient selection.}, + doi = {10.3389/fimmu.2022.954910}, + pages = {954910}, + volume = {13}, + abstract = {Different programmed cell death-ligand 1 (PD-L1) assays and scoring algorithms are being used in the evaluation of PD-L1 expression for the selection of patients for immunotherapy in specific settings of advanced urothelial carcinoma (UC). In this paper, we sought to investigate three approved assays (Ventana SP142 and SP263, and Dako 22C3) in UC with emphasis on implications for patient selection for atezolizumab/pembrolizumab as the first line of treatment. Tumors from 124 patients with invasive UC of the bladder were analyzed using tissue microarrays (TMA). Serial sections were stained with SP263 and SP142 on Ventana Benchmark Ultra and with 22C3 on Dako Autostainer Link 48. Stains were evaluated independently by two observers and scored using the combined positive score (CPS) and tumor infiltrating immune cells (IC) algorithms. Differences in proportions (DP), overall percent agreement (OPA), positive percent agreement (PPA), negative percent agreement (NPA), and Cohen k were calculated for all comparable cases. Good overall concordance in analytic performance was observed for 22C3 and SP263 with both scoring algorithms; specifically, the highest OPA was observed between 22C3 and SP263 (89.6%) when using CPS. On the other hand, SP142 consistently showed lower positivity rates with high differences in proportions (DP) compared with 22C3 and SP263 with both CPS and IC, and with a low PPA, especially when using the CPS algorithm. In conclusion, 22C3 and SP263 assays show comparable analytical performance while SP142 shows divergent staining results, with important implications for the selection of patients for both pembrolizumab and atezolizumab.}, + file = {Muna22.pdf:pdf\\Muna22.pdf:PDF}, + journal = {Frontiers in immunology}, + optnote = {DIAG, RADIOLOGY}, + pmid = {35967344}, + year = {2022}, + ss_id = {e7158d01e00c44011785e31e48d9082c3b3e08ce}, + all_ss_ids = {['e7158d01e00c44011785e31e48d9082c3b3e08ce']}, + gscites = {3}, } @article{Muns19a, @@ -17148,7 +21261,9 @@ @article{Muns19a publisher = {Wiley Online Library}, month = {6}, gsid = {9651421720990871193}, - gscites = {6}, + gscites = {27}, + ss_id = {aa4959c07dab73932a118ba1d75fdde23e55ffc6}, + all_ss_ids = {['aa4959c07dab73932a118ba1d75fdde23e55ffc6']}, } @inproceedings{Murp07, @@ -17166,6 +21281,8 @@ @inproceedings{Murp07 month = {3}, gsid = {11443400652096701431}, gscites = {22}, + ss_id = {46ad5997668ef588b9eae850299e642b90262060}, + all_ss_ids = {['46ad5997668ef588b9eae850299e642b90262060']}, } @inproceedings{Murp08, @@ -17182,6 +21299,8 @@ @inproceedings{Murp08 pmid = {18982703}, gsid = {12888240555944431806}, gscites = {86}, + ss_id = {42051c5c9cfd535a32221211dd2fec5ab3d2a12f}, + all_ss_ids = {['42051c5c9cfd535a32221211dd2fec5ab3d2a12f']}, } @inproceedings{Murp08a, @@ -17232,8 +21351,10 @@ @article{Murp09 pmid = {19646913}, month = {10}, gsid = {13680973010063298300}, - gscites = {275}, + gscites = {320}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/81262}, + ss_id = {1b28c67c8c23bb85dd86a56e661ece107dd4c98f}, + all_ss_ids = {['1b28c67c8c23bb85dd86a56e661ece107dd4c98f']}, } @conference{Murp09a, @@ -17260,6 +21381,8 @@ @article{Murp10 month = {2}, gsid = {15970567742966563807}, gscites = {135}, + ss_id = {11270fdc633003eec7df21d1d9a8d807d8cf1d0c}, + all_ss_ids = {['11270fdc633003eec7df21d1d9a8d807d8cf1d0c']}, } @inproceedings{Murp10a, @@ -17273,6 +21396,8 @@ @inproceedings{Murp10a optnote = {DIAG, RADIOLOGY}, gsid = {13824588902399310721}, gscites = {45}, + ss_id = {ce5780f841ac6b5576199b0c22d2e0ba374c30b4}, + all_ss_ids = {['ce5780f841ac6b5576199b0c22d2e0ba374c30b4']}, } @phdthesis{Murp11, @@ -17304,8 +21429,10 @@ @article{Murp11a pmid = {21632295}, month = {11}, gsid = {11653185328679959383}, - gscites = {391}, + gscites = {450}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/96888}, + ss_id = {31f676889affcf561b077e661380e5fd945aabba}, + all_ss_ids = {['31f676889affcf561b077e661380e5fd945aabba']}, } @article{Murp12, @@ -17325,6 +21452,8 @@ @article{Murp12 gsid = {4159560996296390373}, gscites = {49}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110012}, + ss_id = {9ca3badc4daf9fd3da93b868e84d06476393fc78}, + all_ss_ids = {['9ca3badc4daf9fd3da93b868e84d06476393fc78']}, } @article{Murp19, @@ -17334,9 +21463,11 @@ @article{Murp19 year = {2019}, url = {https://arxiv.org/abs/1903.03349}, abstract = {There is a growing interest in the automated analysis of chest X-Ray (CXR) as a sensitive and inexpensive means of screening susceptible populations for pulmonary tuberculosis. In this work we evaluate the latest version of CAD4TB, a software platform designed for this purpose. Version 6 of CAD4TB was released in 2018 and is here tested on an independent dataset of 5565 CXR images with GeneXpert (Xpert) sputum test results available (854 Xpert positive subjects). A subset of 500 subjects (50% Xpert positive) was reviewed and annotated by 5 expert observers independently to obtain a radiological reference standard. The latest version of CAD4TB is found to outperform all previous versions in terms of area under receiver operating curve (ROC) with respect to both Xpert and radiological reference standards. Improvements with respect to Xpert are most apparent at high sensitivity levels with a specificity of 76% obtained at 90% sensitivity. When compared with the radiological reference standard, CAD4TB v6 also outperformed previous versions by a considerable margin and achieved 98% specificity at 90% sensitivity. No substantial difference was found between the performance of CAD4TB v6 and any of the various expert observers against the Xpert reference standard. A cost and efficiency analysis on this dataset demonstrates that in a standard clinical situation, operating at 90% sensitivity, users of CAD4TB v6 can process 132 subjects per day at an average cost per screen of \$5.95 per sub - ject, while users of version 3 process only 85 subjects per day at a cost of \$8.41 per subject. At all tested operating points version 6 is shown to be more efficient and cost effective than any other version.}, + ject, while users of version 3 process only 85 subjects per day at a cost of \$8.41 per subject. At all tested operating points version 6 is shown to be more efficient and cost effective than any other version.}, optnote = {DIAG, RADIOLOGY}, month = {3}, + all_ss_ids = {['61d1507dec6ef557cba474c4f67b1d0ad3631fb0']}, + gscites = {103}, } @article{Murp20, @@ -17354,7 +21485,9 @@ @article{Murp20 year = {2020}, month = {3}, gsid = {5738262576738254867}, - gscites = {8}, + gscites = {103}, + ss_id = {61d1507dec6ef557cba474c4f67b1d0ad3631fb0}, + all_ss_ids = {['61d1507dec6ef557cba474c4f67b1d0ad3631fb0']}, } @article{Murp20a, @@ -17371,7 +21504,9 @@ @article{Murp20a pmid = {32384019}, month = {5}, gsid = {15067361865056080572}, - gscites = {12}, + gscites = {147}, + ss_id = {1f77425465d8a5dd20e949f9e80aa4da590efbb5}, + all_ss_ids = {['1f77425465d8a5dd20e949f9e80aa4da590efbb5']}, } @conference{Mus12, @@ -17383,6 +21518,23 @@ @conference{Mus12 optnote = {DIAG, RADIOLOGY}, } +@article{Mus17, + author = {Mus, Roel D. and Borelli, Cristina and Bult, Peter and Weiland, Elisabeth and Karssemeijer, Nico and Barentsz, Jelle O. and Gubern-M\'{e}rida, Albert and Platel, Bram and Mann, Ritse M.}, + title = {~~Time to enhancement derived from ultrafast breast MRI as a novel parameter to discriminate benign from malignant breast lesions}, + doi = {10.1016/j.ejrad.2017.01.020}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejrad.2017.01.020}, + file = {Mus17.pdf:pdf\\Mus17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + automatic = {yes}, + citation-count = {61}, + pages = {90-96}, + volume = {89}, + pmid = {28267555}, +} + @article{Muyo14, author = {Muyoyeta, M. and Maduskar, P. and Moyo, M. and Kasese, N. and Milimo, D. and Spooner, R. and Kapata, N. and Hogeweg, L. and van Ginneken, B. and Ayles, H.}, title = {The Sensitivity and Specificity of Using a Computer Aided Diagnosis Program for Automatically Scoring Chest {X}-Rays of Presumptive {TB} Patients Compared with {X}pert {MTB/RIF} in {L}usaka {Z}ambia}, @@ -17398,7 +21550,9 @@ @article{Muyo14 pmid = {24705629}, month = {4}, gsid = {4954392116893371357}, - gscites = {63}, + gscites = {82}, + ss_id = {ba7ca751b20644a47eceb276948d676bf42609fa}, + all_ss_ids = {['ba7ca751b20644a47eceb276948d676bf42609fa']}, } @article{Nab90, @@ -17415,6 +21569,8 @@ @article{Nab90 pmid = {2263263}, gsid = {8785684717863376362}, gscites = {3}, + ss_id = {fd4a0f80717b0dc6a8157fd6148aec44788aa903}, + all_ss_ids = {['fd4a0f80717b0dc6a8157fd6148aec44788aa903']}, } @article{Nab92, @@ -17431,6 +21587,8 @@ @article{Nab92 month = {1}, gsid = {14419021407581029067}, gscites = {45}, + ss_id = {5aca0ca5805abfa265b540ba865c83a0cc96d23a}, + all_ss_ids = {['5aca0ca5805abfa265b540ba865c83a0cc96d23a']}, } @article{Nage13, @@ -17463,6 +21621,22 @@ @article{Naid13 number = {1}, pmid = {23070270}, month = {1}, + all_ss_ids = {c37197020c3e86415f814d66e49de6f11d7cdbf1}, + gscites = {1000}, +} + +@article{Naik22, + author = {Naik, Nithesh and Hameed, B. M. Zeeshan and Sooriyaperakasam, Nilakshman and Vinayahalingam, Shankeeth and Patil, Vathsala and Smriti, Komal and Saxena, Janhavi and Shah, Milap and Ibrahim, Sufyan and Singh, Anshuman and Karimi, Hadis and Naganathan, Karthickeyan and Shetty, Dasharathraj K. and Rai, Bhavan Prasad and Chlosta, Piotr and Somani, Bhaskar K.}, + title = {Transforming healthcare through a digital revolution: A review of digital healthcare technologies and solutions.}, + doi = {10.3389/fdgth.2022.919985}, + pages = {919985}, + volume = {4}, + abstract = {The COVID-19 pandemic has put a strain on the entire global healthcare infrastructure. The pandemic has necessitated the re-invention, re-organization, and transformation of the healthcare system. The resurgence of new COVID-19 virus variants in several countries and the infection of a larger group of communities necessitate a rapid strategic shift. Governments, non-profit, and other healthcare organizations have all proposed various digital solutions. It's not clear whether these digital solutions are adaptable, functional, effective, or reliable. With the disease becoming more and more prevalent, many countries are looking for assistance and implementation of digital technologies to combat COVID-19. Digital health technologies for COVID-19 pandemic management, surveillance, contact tracing, diagnosis, treatment, and prevention will be discussed in this paper to ensure that healthcare is delivered effectively. Artificial Intelligence (AI), big data, telemedicine, robotic solutions, Internet of Things (IoT), digital platforms for communication (DC), computer vision, computer audition (CA), digital data management solutions (blockchain), digital imaging are premiering to assist healthcare workers (HCW's) with solutions that include case base surveillance, information dissemination, disinfection, and remote consultations, along with many other such interventions.}, + file = {Naik22.pdf:pdf\\Naik22.pdf:PDF}, + journal = {Frontiers in digital health}, + optnote = {DIAG}, + pmid = {35990014}, + year = {2022}, } @conference{Nair12a, @@ -17474,6 +21648,23 @@ @conference{Nair12a optnote = {DIAG, RADIOLOGY}, } +@article{Nair18, + author = {Nair, Arjun and Bartlett, Emily C. and Walsh, Simon L.F. and Wells, Athol U. and Navani, Neal and Hardavella, Georgia and Bhalla, Sanjeev and Calandriello, Lucio and Devaraj, Anand and Goo, Jin Mo and Klein, Jeffrey S. and MacMahon, Heber and Schaefer-Prokop, C.M. and Seo, Joon-Beom and Sverzellati, Nicola and Desai, Sujal R.}, + title = {~~Variable radiological lung nodule evaluation leads to divergent management recommendations}, + doi = {10.1183/13993003.01359-2018}, + year = {2018}, + abstract = {Radiological evaluation of incidentally detected lung nodules on computed tomography (CT) influences management. We assessed international radiological variation in 1) pulmonary nodule characterisation; 2) hypothetical guideline-derived management; and 3) radiologists' management recommendations.107 radiologists from 25 countries evaluated 69 CT-detected nodules, recording: 1) first-choice composition (solid, part-solid or ground-glass, with percentage confidence); 2) morphological features; 3) dimensions; 4) recommended management; and 5) decision-influencing factors. We modelled hypothetical management decisions on the 2005 and updated 2017 Fleischner Society, and both liberal and parsimonious interpretations of the British Thoracic Society 2015 guidelines.Overall agreement for first-choice nodule composition was good (Fleiss' k=0.65), but poorest for part-solid nodules (weighted k 0.62, interquartile range 0.50-0.71). Morphological variables, including spiculation (k=0.35), showed poor-to-moderate agreement (k=0.23-0.53). Variation in diameter was greatest at key thresholds (5 mm and 6 mm). Agreement for radiologists' recommendations was poor (k=0.30); 21% disagreed with the majority. Although agreement within the four guideline-modelled management strategies was good (k=0.63-0.73), 5-10% of radiologists would disagree with majority decisions if they applied guidelines strictly.Agreement was lowest for part-solid nodules, while significant measurement variation exists at important size thresholds. These variations resulted in generally good agreement for guideline-modelled management, but poor agreement for radiologists' actual recommendations.}, + url = {http://dx.doi.org/10.1183/13993003.01359-2018}, + file = {Nair18.pdf:pdf\\Nair18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Respiratory Journal}, + automatic = {yes}, + citation-count = {24}, + pages = {1801359}, + volume = {52}, + pmid = {30409817}, +} + @article{Napo19, author = {Napolitano, George and Lynge, Elsebeth and Lillholm, Martin and Vejborg, Ilse and van Gils, Carla H and Nielsen, Mads and Karssemeijer, Nico}, title = {Change in mammographic density across birth cohorts of {{D}}utch breast cancer screening participants}, @@ -17489,7 +21680,43 @@ @article{Napo19 optnote = {DIAG, RADIOLOGY}, pmid = {30762225}, gsid = {1389657668429886587}, - gscites = {2}, + gscites = {4}, + ss_id = {c94e50909368cf1112536dd7224fe141c2799b87}, + all_ss_ids = {['c94e50909368cf1112536dd7224fe141c2799b87']}, +} + +@article{Nas22, + author = {Nas, J and Thannhauser, J and Vart, P and van Geuns, RJM and Muijsers, HEC and Mol, JHQ and Aarts, GWA and Konijnenberg, LSF and Gommans, DHF and Ahoud-Schoenmakers, SGAM and Vos, JL and van Royen, N and Bonnes, JL and Brouwer, MA}, + title = {~~The impact of alcohol use on the quality of cardiopulmonary resuscitation among festival attendees: A prespecified analysis of a randomised trial}, + doi = {10.1016/j.resuscitation.2022.10.002}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.resuscitation.2022.10.002}, + file = {Nas22.pdf:pdf\\Nas22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Resuscitation}, + automatic = {yes}, + citation-count = {1}, + pages = {12-19}, + volume = {181}, + pmid = {35587346}, +} + +@article{Nas22, + author = {Nas, J and Thannhauser, J and Vart, P and van Geuns, RJM and Muijsers, HEC and Mol, JHQ and Aarts, GWA and Konijnenberg, LSF and Gommans, DHF and Ahoud-Schoenmakers, SGAM and Vos, JL and van Royen, N and Bonnes, JL and Brouwer, MA}, + title = {~~The impact of alcohol use on the quality of cardiopulmonary resuscitation among festival attendees: A prespecified analysis of a randomised trial}, + doi = {10.1016/j.resuscitation.2022.10.002}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.resuscitation.2022.10.002}, + file = {Nas22.pdf:pdf\\Nas22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Resuscitation}, + automatic = {yes}, + citation-count = {1}, + pages = {12-19}, + volume = {181}, + pmid = {35587346}, } @article{Niek09, @@ -17509,6 +21736,8 @@ @article{Niek09 gsid = {10214499820404046656}, gscites = {40}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/81446}, + ss_id = {75b311285b30601afcb96dcb4150b382f9266aec}, + all_ss_ids = {['75b311285b30601afcb96dcb4150b382f9266aec']}, } @article{Niel11, @@ -17544,6 +21773,8 @@ @inproceedings{Niem03a month = {5}, gsid = {10281073666494131994}, gscites = {77}, + ss_id = {7c6eef6134210ac70172c73bfd341809aac67b9b}, + all_ss_ids = {['7c6eef6134210ac70172c73bfd341809aac67b9b']}, } @conference{Niem03b, @@ -17571,7 +21802,9 @@ @inproceedings{Niem04a optnote = {DIAG, RADIOLOGY}, month = {5}, gsid = {4847858369065721334}, - gscites = {849}, + gscites = {883}, + ss_id = {798e312dd67798024da74f9a8f92946af88c7cd4}, + all_ss_ids = {['798e312dd67798024da74f9a8f92946af88c7cd4']}, } @patent{Niem05, @@ -17594,6 +21827,8 @@ @inproceedings{Niem05a optnote = {DIAG, RADIOLOGY}, gsid = {9478136858074324990}, gscites = {26}, + ss_id = {e09a88838edfa47d94493c9aa186b4d48ff63945}, + all_ss_ids = {['e09a88838edfa47d94493c9aa186b4d48ff63945']}, } @article{Niem05b, @@ -17611,7 +21846,9 @@ @article{Niem05b pmid = {15889546}, month = {5}, gsid = {13743541034647495598}, - gscites = {586}, + gscites = {595}, + ss_id = {444deb4d61347150f98d2095192bf0c9a75c99bf}, + all_ss_ids = {['444deb4d61347150f98d2095192bf0c9a75c99bf']}, } @inproceedings{Niem05c, @@ -17643,6 +21880,8 @@ @article{Niem06a month = {12}, gsid = {9152679895729904090}, gscites = {144}, + ss_id = {2ee501f792ae098305ec14a721755a387cb01309}, + all_ss_ids = {['2ee501f792ae098305ec14a721755a387cb01309']}, } @phdthesis{Niem06b, @@ -17677,6 +21916,8 @@ @article{Niem07a month = {1}, gsid = {446255861867709018}, gscites = {247}, + ss_id = {aa6b6a54343f1c9bba9b1dd2579b1132bc775d79}, + all_ss_ids = {['aa6b6a54343f1c9bba9b1dd2579b1132bc775d79']}, } @article{Niem07b, @@ -17694,7 +21935,9 @@ @article{Niem07b pmid = {17460289}, month = {5}, gsid = {13784025486209249363}, - gscites = {388}, + gscites = {417}, + ss_id = {6a1e5f1ff1b3106d160cb1e486fb9611252c393d}, + all_ss_ids = {['6a1e5f1ff1b3106d160cb1e486fb9611252c393d']}, } @inproceedings{Niem08, @@ -17710,7 +21953,9 @@ @inproceedings{Niem08 pmid = {19163472}, month = {8}, gsid = {17667278035436647511}, - gscites = {32}, + gscites = {42}, + ss_id = {9d9799545cca4d430a3e3edcce2fc0b6c8ba99c3}, + all_ss_ids = {['9d9799545cca4d430a3e3edcce2fc0b6c8ba99c3']}, } @inproceedings{Niem08a, @@ -17727,6 +21972,8 @@ @inproceedings{Niem08a month = {3}, gsid = {7748171457790516188}, gscites = {88}, + ss_id = {b2c5f8d2a2ad3759ab3844e90de416857145fc89}, + all_ss_ids = {['b2c5f8d2a2ad3759ab3844e90de416857145fc89']}, } @article{Niem09, @@ -17744,8 +21991,10 @@ @article{Niem09 pmid = {19782633}, month = {12}, gsid = {3777725572675616469}, - gscites = {247}, + gscites = {255}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/80726}, + ss_id = {9c948dbc3d1c7fc86c90f1b0b98e5aea37bb7c88}, + all_ss_ids = {['9c948dbc3d1c7fc86c90f1b0b98e5aea37bb7c88']}, } @article{Niem09a, @@ -17763,7 +22012,9 @@ @article{Niem09a pmid = {19150786}, month = {5}, gsid = {14795703699235766374}, - gscites = {124}, + gscites = {136}, + ss_id = {852e0bb4efbd7906c4c1eb35a69fc2c0eb51f9d6}, + all_ss_ids = {['852e0bb4efbd7906c4c1eb35a69fc2c0eb51f9d6']}, } @inproceedings{Niem09b, @@ -17780,7 +22031,9 @@ @inproceedings{Niem09b optnote = {DIAG, RADIOLOGY}, month = {2}, gsid = {11931099815327808952}, - gscites = {58}, + gscites = {65}, + ss_id = {df038b6c7e9eb4a67f30f4752a478110f5b1d1b1}, + all_ss_ids = {['df038b6c7e9eb4a67f30f4752a478110f5b1d1b1']}, } @inproceedings{Niem09c, @@ -17798,6 +22051,8 @@ @inproceedings{Niem09c month = {2}, gsid = {17967959879140071576}, gscites = {74}, + ss_id = {3dee47df1e52968b02ac5ca0ce2790e7d4c56ac2}, + all_ss_ids = {['3dee47df1e52968b02ac5ca0ce2790e7d4c56ac2']}, } @inproceedings{Niem09d, @@ -17815,6 +22070,8 @@ @inproceedings{Niem09d month = {2}, gsid = {14163524361307936981}, gscites = {14}, + ss_id = {f3cac15257a142e4137eb93feb1ba1cf1127221a}, + all_ss_ids = {['f3cac15257a142e4137eb93feb1ba1cf1127221a']}, } @conference{Niem09e, @@ -17841,8 +22098,10 @@ @article{Niem10 pmid = {19822469}, month = {1}, gsid = {9532443460643942339}, - gscites = {446}, + gscites = {512}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/89373}, + ss_id = {378dddb8472d1dd8e8bf18fc670916cdda4d4c1e}, + all_ss_ids = {['378dddb8472d1dd8e8bf18fc670916cdda4d4c1e']}, } @inproceedings{Niem10a, @@ -17860,6 +22119,8 @@ @inproceedings{Niem10a month = {3}, gsid = {4396585503730942977}, gscites = {23}, + ss_id = {38a508f72094aa3810427c53f1a54ebbf15b7294}, + all_ss_ids = {['38a508f72094aa3810427c53f1a54ebbf15b7294']}, } @inproceedings{Niem11, @@ -17877,6 +22138,8 @@ @inproceedings{Niem11 month = {3}, gsid = {15284653557100524466}, gscites = {1}, + ss_id = {d3fdf1be1b16875985db2121c467e852b2e6e319}, + all_ss_ids = {['d3fdf1be1b16875985db2121c467e852b2e6e319']}, } @article{Niem11a, @@ -17894,7 +22157,9 @@ @article{Niem11a pmid = {20813633}, month = {2}, gsid = {17448530076208906101}, - gscites = {98}, + gscites = {130}, + ss_id = {e2b916c12347faafc35876900a78ff9efe136791}, + all_ss_ids = {['e2b916c12347faafc35876900a78ff9efe136791']}, } @article{Niem11b, @@ -17912,8 +22177,27 @@ @article{Niem11b pmid = {21690008}, month = {11}, gsid = {14355423160597618268}, - gscites = {149}, + gscites = {183}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/97661}, + ss_id = {05abf67f5320dd744e9edf76d5b04d4258bd49bd}, + all_ss_ids = {['05abf67f5320dd744e9edf76d5b04d4258bd49bd']}, +} + +@article{Nijh16, + author = {Nijhof, Bonnie and Castells-Nobau, Anna and Wolf, Louis and Scheffer-de Gooyert, Jolanda M. and Monedero, Ignacio and Torroja, Laura and Coromina, Lluis and van der Laak, Jeroen A. W. M. and Schenck, Annette}, + title = {~~A New Fiji-Based Algorithm That Systematically Quantifies Nine Synaptic Parameters Provides Insights into Drosophila NMJ Morphometry}, + doi = {10.1371/journal.pcbi.1004823}, + year = {2016}, + abstract = {Author Summary Altered synapse function underlies cognitive disorders such as intellectual disability, autism and schizophrenia. The morphology of synapses is crucial for their function but is often described using only a small number of parameters or categories. As a consequence, it is still unknown how different aspects of synapse morphology relate to each other and whether they respond in a coordinated or independent manner. Here, we report a sensitive and multiparametric method for systematic synapse morphometry at the Drosophila Neuromuscular Junction (NMJ), a popular model for mammalian synapse biology. Surveying a large NMJ image repository, we provide insights in the natural variation of NMJ morphology as a result of differences in gender, genetic background and abdominal body segment. We show which synapse parameters correlate and find that parameters fall into five groups. Based on our findings, we propose that two of them, NMJ size and geometry, are controlled by different molecular mechanisms. Our study provides insights into the design principles of a model synapse and tools that can be applied in future studies to identify genes that modulate or co-orchestrate different aspects of synapse morphology.}, + url = {http://dx.doi.org/10.1371/journal.pcbi.1004823}, + file = {Nijh16.pdf:pdf\\Nijh16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {PLOS Computational Biology}, + automatic = {yes}, + citation-count = {27}, + pages = {e1004823}, + volume = {12}, + pmid = {26998933}, } @article{Nill07, @@ -17933,6 +22217,8 @@ @article{Nill07 gsid = {6175723420112201828}, gscites = {41}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/52135}, + ss_id = {25e214b3c90dc8afcc8418ea9d5001f50a223fa4}, + all_ss_ids = {['25e214b3c90dc8afcc8418ea9d5001f50a223fa4']}, } @article{Nill09, @@ -17969,6 +22255,8 @@ @inproceedings{Nill09a pmid = {20426200}, gsid = {12873469043739776735}, gscites = {13}, + ss_id = {336b5a32e73a31cbc276d6dd0750e5ca42cfe073}, + all_ss_ids = {['336b5a32e73a31cbc276d6dd0750e5ca42cfe073']}, } @article{Nill11, @@ -17988,6 +22276,8 @@ @article{Nill11 gsid = {1538861246327733103}, gscites = {19}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/95767}, + ss_id = {1b2583976cd39108af8f0b7d9f1900b4e61e4c95}, + all_ss_ids = {['1b2583976cd39108af8f0b7d9f1900b4e61e4c95']}, } @article{Noot22, @@ -17995,17 +22285,20 @@ @article{Noot22 title = {Knowledge distillation with ensembles of convolutional neural networks for medical image segmentation}, doi = {https://doi.org/10.1117/1.JMI.9.5.052407}, abstract = {Purpose: Ensembles of convolutional neural networks (CNNs) often outperform a single CNN in medical image segmentation tasks, but inference is computationally more expensive and makes ensembles unattractive for some applications. We compared the performance of differently constructed ensembles with the performance of CNNs derived from these ensembles using knowledge distillation, a technique for reducing the footprint of large models such as ensembles. - - Approach: We investigated two different types of ensembles, namely, diverse ensembles of networks with three different architectures and two different loss-functions, and uniform ensembles of networks with the same architecture but initialized with different random seeds. For each ensemble, additionally, a single student network was trained to mimic the class probabilities predicted by the teacher model, the ensemble. We evaluated the performance of each network, the ensembles, and the corresponding distilled networks across three different publicly available datasets. These included chest computed tomography scans with four annotated organs of interest, brain magnetic resonance imaging (MRI) with six annotated brain structures, and cardiac cine-MRI with three annotated heart structures. - - Results: Both uniform and diverse ensembles obtained better results than any of the individual networks in the ensemble. Furthermore, applying knowledge distillation resulted in a single network that was smaller and faster without compromising performance compared with the ensemble it learned from. The distilled networks significantly outperformed the same network trained with reference segmentation instead of knowledge distillation. - - Conclusion: Knowledge distillation can compress segmentation ensembles of uniform or diverse composition into a single CNN while maintaining the performance of the ensemble.}, + + Approach: We investigated two different types of ensembles, namely, diverse ensembles of networks with three different architectures and two different loss-functions, and uniform ensembles of networks with the same architecture but initialized with different random seeds. For each ensemble, additionally, a single student network was trained to mimic the class probabilities predicted by the teacher model, the ensemble. We evaluated the performance of each network, the ensembles, and the corresponding distilled networks across three different publicly available datasets. These included chest computed tomography scans with four annotated organs of interest, brain magnetic resonance imaging (MRI) with six annotated brain structures, and cardiac cine-MRI with three annotated heart structures. + + Results: Both uniform and diverse ensembles obtained better results than any of the individual networks in the ensemble. Furthermore, applying knowledge distillation resulted in a single network that was smaller and faster without compromising performance compared with the ensemble it learned from. The distilled networks significantly outperformed the same network trained with reference segmentation instead of knowledge distillation. + + Conclusion: Knowledge distillation can compress segmentation ensembles of uniform or diverse composition into a single CNN while maintaining the performance of the ensemble.}, file = {Noot22.pdf:pdf\\Noot22.pdf:PDF}, journal = {Journal of Medical Imaging}, month = {05}, optnote = {DIAG, RADIOLOGY}, year = {2022}, + ss_id = {0aabb1a4831d724bf28ef9a9a0d7bde6a1bb2ea7}, + all_ss_ids = {['0aabb1a4831d724bf28ef9a9a0d7bde6a1bb2ea7']}, + gscites = {5}, } @conference{Oei11a, @@ -18068,7 +22361,9 @@ @article{Oei17 optnote = {DIAG}, pmid = {27651144}, gsid = {3697066219334813948}, - gscites = {10}, + gscites = {11}, + ss_id = {242895b9618a7225acb805571956c6843dae0858}, + all_ss_ids = {['c21f83954bab86611134478ab918399813c10313', '242895b9618a7225acb805571956c6843dae0858']}, } @article{Oei17a, @@ -18087,6 +22382,8 @@ @article{Oei17a pmid = {27718078}, gsid = {17733791166970944449}, gscites = {7}, + ss_id = {6cc4abfa6775f257c9231648a24ac9d825b232dd}, + all_ss_ids = {['6cc4abfa6775f257c9231648a24ac9d825b232dd']}, } @article{Oei18, @@ -18099,18 +22396,20 @@ @article{Oei18 pages = {3902-3911}, doi = {10.1007/s00330-018-5353-y}, abstract = {Objectives: To assess observer variability of different reference tissues used for relative CBV (rCBV) measurements in DSC-MRI of glioma patients. - - Methods: In this retrospective study, three observers measured rCBVin DSC-MRimages of 44 glioma patients on two occasions. rCBVis calculated by the CBVin the tumour hotspot/the CBVof a reference tissue at the contralateral side for normalization. One observer annotated the tumour hotspot that was kept constant for all measurements. All observers annotated eight reference tissues of normal white and grey matter. Observer variability was evaluated using the intraclass correlation coefficient (ICC), coefficient of variation (CV) and Bland-Altman analyses. - - Results: For intra-observer, the ICC ranged from 0.50-0.97 (fair-excellent) for all reference tissues. The CV ranged from 5.1-22.1 % for all reference tissues and observers. For inter-observer, the ICC for all pairwise observer combinations ranged from 0.44-0.92 (poor-excellent). The CV ranged from 8.1-31.1 %. Centrum semiovale was the only reference tissue that showed excellent intra- and inter-observer agreement (ICC>0.85) and lowest CVs (<12.5 %). Bland-Altman analyses showed that mean differences for centrum semiovale were close to zero. - - Conclusion: Selecting contralateral centrum semiovale as reference tissue for rCBV provides the lowest observer variability.}, + + Methods: In this retrospective study, three observers measured rCBVin DSC-MRimages of 44 glioma patients on two occasions. rCBVis calculated by the CBVin the tumour hotspot/the CBVof a reference tissue at the contralateral side for normalization. One observer annotated the tumour hotspot that was kept constant for all measurements. All observers annotated eight reference tissues of normal white and grey matter. Observer variability was evaluated using the intraclass correlation coefficient (ICC), coefficient of variation (CV) and Bland-Altman analyses. + + Results: For intra-observer, the ICC ranged from 0.50-0.97 (fair-excellent) for all reference tissues. The CV ranged from 5.1-22.1 % for all reference tissues and observers. For inter-observer, the ICC for all pairwise observer combinations ranged from 0.44-0.92 (poor-excellent). The CV ranged from 8.1-31.1 %. Centrum semiovale was the only reference tissue that showed excellent intra- and inter-observer agreement (ICC>0.85) and lowest CVs (<12.5 %). Bland-Altman analyses showed that mean differences for centrum semiovale were close to zero. + + Conclusion: Selecting contralateral centrum semiovale as reference tissue for rCBV provides the lowest observer variability.}, file = {:Oei18 - Observer Variability of Reference Tissue Selection for Relative Cerebral Blood Volume Measurements in Glioma Patients.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {29572637}, month = {3}, gsid = {6208890709619164011}, - gscites = {5}, + gscites = {10}, + ss_id = {3c92977e98bbb2991e1843f3c3a2941e651d019d}, + all_ss_ids = {['3c92977e98bbb2991e1843f3c3a2941e651d019d']}, } @article{Oest91, @@ -18145,6 +22444,33 @@ @article{Oest91a gscites = {4}, } +@article{Ogon22, + author = {Ogony, Joshua and de Bel, Thomas and Radisky, Derek C. and Kachergus, Jennifer and Thompson, E. Aubrey and Degnim, Amy C. and Ruddy, Kathryn J. and Hilton, Tracy and Stallings-Mann, Melody and Vachon, Celine and Hoskin, Tanya L. and Heckman, Michael G. and Vierkant, Robert A. and White, Launia J. and Moore, Raymond M. and Carter, Jodi and Jensen, Matthew and Pacheco-Spann, Laura and Henry, Jill E. and Storniolo, Anna Maria and Winham, Stacey J. and van der Laak, Jeroen and Sherman, Mark E.}, + title = {~~Towards defining morphologic parameters of normal parous and nulliparous breast tissues by artificial intelligence}, + doi = {10.1186/s13058-022-01541-z}, + year = {2022}, + abstract = {Abstract + Background + Breast terminal duct lobular units (TDLUs), the source of most breast cancer (BC) precursors, are shaped by age-related involution, a gradual process, and postpartum involution (PPI), a dramatic inflammatory process that restores baseline microanatomy after weaning. Dysregulated PPI is implicated in the pathogenesis of postpartum BCs. We propose that assessment of TDLUs in the postpartum period may have value in risk estimation, but characteristics of these tissues in relation to epidemiological factors are incompletely described. + + Methods + Using validated Artificial Intelligence and morphometric methods, we analyzed digitized images of tissue sections of normal breast tissues stained with hematoxylin and eosin from donors <= 45 years from the Komen Tissue Bank (180 parous and 545 nulliparous). Metrics assessed by AI, included: TDLU count; adipose tissue fraction; mean acini count/TDLU; mean dilated acini; mean average acini area; mean "capillary" area; mean epithelial area; mean ratio of epithelial area versus intralobular stroma; mean mononuclear cell count (surrogate of immune cells); mean fat area proximate to TDLUs and TDLU area. We compared epidemiologic characteristics collected via questionnaire by parity status and race, using a Wilcoxon rank sum test or Fisher's exact test. Histologic features were compared between nulliparous and parous women (overall and by time between last birth and donation [recent birth: <= 5 years versus remote birth: > 5 years]) using multivariable regression models. + + Results + Normal breast tissues of parous women contained significantly higher TDLU counts and acini counts, more frequent dilated acini, higher mononuclear cell counts in TDLUs and smaller acini area per TDLU than nulliparas (all multivariable analyses p < 0.001). Differences in TDLU counts and average acini size persisted for > 5 years postpartum, whereas increases in immune cells were most marked <= 5 years of a birth. Relationships were suggestively modified by several other factors, including demographic and reproductive characteristics, ethanol consumption and breastfeeding duration. + + Conclusions + Our study identified sustained expansion of TDLU numbers and reduced average acini area among parous versus nulliparous women and notable increases in immune responses within five years following childbirth. Further, we show that quantitative characteristics of normal breast samples vary with demographic features and BC risk factors. + }, + url = {http://dx.doi.org/10.1186/s13058-022-01541-z}, + file = {Ogon22.pdf:pdf\\Ogon22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Breast Cancer Research}, + automatic = {yes}, + citation-count = {1}, + volume = {24}, +} + @article{Olac20, author = {Olaciregui-Ruiz, Igor and Torres-Xirau, Iban and Teuwen, Jonas and van der Heide, Uulke A. and Mans, Anton}, title = {A Deep Learning-based correction to {EPID} dosimetry for attenuation and scatter in the {Unity MR-Linac} system}, @@ -18152,20 +22478,23 @@ @article{Olac20 pages = {124-131}, volume = {71}, abstract = {Purpose: - EPID dosimetry in the Unity MR-Linac system allows for reconstruction of absolute dose distributions within the patient geometry. Dose reconstruction is accurate for the parts of the beam arriving at the EPID through the MRI central unattenuated region, free of gradient coils, resulting in a maximum field size of ~10x22 cm2 at isocentre. The purpose of this study is to develop a Deep Learning-based method to improve the accuracy of 2D EPID reconstructed dose distributions outside this central region, accounting for the effects of the extra attenuation and scatter. - - Methods: - A U-Net was trained to correct EPID dose images calculated at the isocenter inside a cylindrical phantom using the corresponding TPS dose images as ground truth for training. The model was evaluated using a 5-fold cross validation procedure. The clinical validity of the U-Net corrected dose images (the so-called DEEPID dose images) was assessed with in vivo verification data of 45 large rectum IMRT fields. The sensitivity of DEEPID to leaf bank position errors (+-1.5 mm) and +-5% MU delivery errors was also tested. - - Results: - Compared to the TPS, in vivo 2D DEEPID dose images showed an average g-pass rate of 90.2% (72.6%-99.4%) outside the central unattenuated region. Without DEEPID correction, this number was 44.5% (4.0%-78.4%). DEEPID correctly detected the introduced delivery errors . - Conclusions: DEEPID allows for accurate dose reconstruction using the entire EPID image, thus enabling dosimetric verification for field sizes up to ~19x22 cm2 at isocentre. The method can be used to detect clinically relevant errors.}, + EPID dosimetry in the Unity MR-Linac system allows for reconstruction of absolute dose distributions within the patient geometry. Dose reconstruction is accurate for the parts of the beam arriving at the EPID through the MRI central unattenuated region, free of gradient coils, resulting in a maximum field size of ~10x22 cm2 at isocentre. The purpose of this study is to develop a Deep Learning-based method to improve the accuracy of 2D EPID reconstructed dose distributions outside this central region, accounting for the effects of the extra attenuation and scatter. + + Methods: + A U-Net was trained to correct EPID dose images calculated at the isocenter inside a cylindrical phantom using the corresponding TPS dose images as ground truth for training. The model was evaluated using a 5-fold cross validation procedure. The clinical validity of the U-Net corrected dose images (the so-called DEEPID dose images) was assessed with in vivo verification data of 45 large rectum IMRT fields. The sensitivity of DEEPID to leaf bank position errors (+-1.5 mm) and +-5% MU delivery errors was also tested. + + Results: + Compared to the TPS, in vivo 2D DEEPID dose images showed an average g-pass rate of 90.2% (72.6%-99.4%) outside the central unattenuated region. Without DEEPID correction, this number was 44.5% (4.0%-78.4%). DEEPID correctly detected the introduced delivery errors . + Conclusions: DEEPID allows for accurate dose reconstruction using the entire EPID image, thus enabling dosimetric verification for field sizes up to ~19x22 cm2 at isocentre. The method can be used to detect clinically relevant errors.}, file = {Olac20.pdf:pdf\\Olac20.pdf:PDF}, journal = PHYSMED, optnote = {DIAG, RADIOLOGY}, pmid = {32135486}, year = {2020}, month = {3}, + ss_id = {98c18814c12cabb5b4b453141f28e6755527efbe}, + all_ss_ids = {['98c18814c12cabb5b4b453141f28e6755527efbe']}, + gscites = {10}, } @article{Olbr95, @@ -18184,6 +22513,57 @@ @article{Olbr95 gscites = {135}, } +@article{Omar20, + author = {Omar, Muhammad Imran and Roobol, Monique J. and Ribal, Maria J. and Abbott, Thomas and Agapow, Paul-Michael and Araujo, Sonia and Asiimwe, Alex and Auffray, Charles and Balaur, Irina and Beyer, Katharina and Bernini, Chiara and Bjartell, Anders and Briganti, Alberto and Butler-Ransohoff, John-Edward and Campi, Riccardo and Cavelaars, Marinel and De Meulder, Bertrand and Devecseri, Zsuzsanna and Voss, Marc Dietrich and Dimitropoulos, Konstantinos and Evans-Axelsson, Susan and Franks, Billy and Fullwood, Louise and Horgan, Denis and Smith, Emma Jane and Kiran, Amit and Kivinummi, Kati and Lambrecht, Mark and Lancet, Doron and Lindgren, Peter and MacLennan, Sara and MacLennan, Steven and Nogueira, Maria Manuela and Moen, Fredrik and Moinat, Maxim and Papineni, Kishore and Reich, Christian and Reiche, Kristin and Rogiers, Stijn and Sartini, Claudio and van Bochove, Kees and van Diggelen, Femke and Van Hemelrijck, Mieke and Van Poppel, Hein and Zong, Jihong and N'Dow, James and Andersson, Emelie and Arala, Heidi and Auvinen, Anssi and Bangma, Chris and Burke, Danny and Cardone, Antonella and Casariego, Joaquin and Cuperus, Guido and Dabestani, Saeed and Esperto, Francesco and Fossati, Nicola and Fridhammar, Adam and Gandaglia, Giorgio and Tandefelt, Delila Gasi and Horn, Friedemann and Huber, Johannes and Hugosson, Jonas and Huisman, Henkjan and Josefsson, Andreas and Kilkku, Olavi and Kreuz, Markus and Lardas, Michael and Lawson, Joe and Lefresne, Florence and Lejeune, Stephane and Longden-Chapman, Elaine and McVie, Gordon and Moris, Lisa and Mottet, Nicolas and Murtola, Teemu and Nicholls, Charlie and Pang, Karl H. and Pascoe, Katie and Picozzi, Marta and Plass, Karin and Pohjanjousi, Pasi and Reaney, Matthew and Remmers, Sebastiaan and Robinson, Paul and Schalken, Jack and Schravendeel, Max and Seisen, Thomas and Servan, Angela and Shiranov, Kirill and Snijder, Robert and Steinbeisser, Carl and Taibi, Nesrine and Talala, Kirsi and Tilki, Derya and Van den Broeck, Thomas and Vassilev, Zdravko and Voima, Olli and Vradi, Eleni and Waldeck, Reg and Weistra, Ward and Willemse, Peter-Paul and Wirth, Manfred and Wolfinger, Russ and Kermani, Nazanin Zounemat and The PIONEER Consortium}, + title = {~~Introducing PIONEER: a project to harness big data in prostate cancer research}, + doi = {10.1038/s41585-020-0324-x}, + year = {2020}, + abstract = {Prostate Cancer Diagnosis and Treatment Enhancement Through the Power of Big Data in Europe (PIONEER) is a European network of excellence for big data in prostate cancer, consisting of 32 private and public stakeholders from 9 countries across Europe. Launched by the Innovative Medicines Initiative 2 and part of the Big Data for Better Outcomes Programme (BD4BO), the overarching goal of PIONEER is to provide high-quality evidence on prostate cancer management by unlocking the potential of big data. The project has identified critical evidence gaps in prostate cancer care, via a detailed prioritization exercise including all key stakeholders. By standardizing and integrating existing high-quality and multidisciplinary data sources from patients with prostate cancer across different stages of the disease, the resulting big data will be assembled into a single innovative data platform for research. Based on a unique set of methodologies, PIONEER aims to advance the field of prostate cancer care with a particular focus on improving prostate-cancer-related outcomes, health system efficiency by streamlining patient management, and the quality of health and social care delivered to all men with prostate cancer and their families worldwide. Prostate Cancer Diagnosis and Treatment Enhancement Through the Power of Big Data in Europe (PIONEER) is a European network of excellence for big data in prostate cancer, consisting of 32 private and public stakeholders from 9 countries across Europe. In this Perspectives article, the authors introduce the PIONEER project and describe its aims and plans for ultimately improving prostate cancer care through the use of big data.}, + url = {http://dx.doi.org/10.1038/s41585-020-0324-x}, + file = {Omar20.pdf:pdf\\Omar20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Nature Reviews Urology}, + automatic = {yes}, + citation-count = {12}, + pages = {351-362}, + volume = {17}, + pmid = {32461687}, +} + +@article{Omar23, + author = {Omar, Muhammad Imran and MacLennan, Steven and Ribal, Maria J. and Roobol, Monique J. and Dimitropoulos, Konstantinos and van den Broeck, Thomas and MacLennan, Sara J. and Axelsson, Susan Evans and Gandaglia, Giorgio and Willemse, Peter-Paul and Mastris, Ken and Ransohoff, John Butler and Devecseri, Zsuzsanna and Abbott, Thomas and De Meulder, Bertrand and Bjartell, Anders and Asiimwe, Alex and N'Dow, James and Smith, Emma and Plass, Karin and Mottet, Nicolas and Shepherd, Robert and Moris, Lisa and Lardas, Michael and Fossati, Nicola and Pang, Karl and Campi, Riccardo and Greco, Isabella and Gacci, Mauro and Serni, Sergio and Lonnerbro, Ragnar and Briganti, Alberto and Crosti, Daniele and Garzonio, Roberto and Faticoni, Martina and Bangma, Chris and Roest, Eliza and Breederland, Arjan and Remmers, Sebastiaan and Tilki, Derya and Auvinen, Anssi and Murtola, Teemu and Visakorpi, Tapio and Talala, Kirsi and Tammela, Teuvo and Siltari, Aino and Van Hemelrijck, Mieke and Beyer, Katharina and Lejeune, Stephane and Colette, Laurence and Caputova, Simona and Poli, Delielena and van Dorp, Sigrid and Byrne, Sophie and Fialho, Luz and Rowland, Ashley and Tapela, Neo and Ugolini, Francesco and Auffray, Charles and Taibi, Nesrine and Hijazy, Ayman and Saporta, Albert and Sun, Kai and Power, Shaun and Kermani, Nazanin Zounemat and van Bochove, Kees and Moinat, Maxim and Kalafati, Mirella and Tafreshiha, Azadeh and Bernini, Chiara and Hlavati, Kristina and Horgan, Denis and Fullwood, Louise and Holtorf, Marc and Lancet, Doron and Bernstein, Gabi and Tripathee, Sheela and Wirth, Manfred and Froehner, Michael and Brenner, Beate and Borkowetz, Angelika and Thomas, Christian and Horn, Friedemann and Reiche, Kristin and Kreuz, Markus and Josefsson, Andreas and Tandefelt, Delila Gasi and Hugosson, Jonas and Schalken, Jack and Huisman, Henkjan and Hofmarcher, Thomas and Lindgren, Peter and Andersson, Emelie and Fridhammar, Adam and Grijalva, Monica Tames and Verholen, Frank and Zong, Jihong and Williamson, Todd and Chandrawansa, Kumari and Waldeck, Reg and Bruno, Amanda and Herrera, Ronald and Nevedomskaya, Ekaterina and Fatoba, Samuel and Constantinovici, Niculae and Mohamed, Ateesha and Steinbeisser, Carl and Maass, Monika and Torremante, Patrizia and Dochy, Emmanuelle and Pisa, Federica and Voss, Marc Dietrich and Kiran, Amit and Papineni, Kishore and Wang-silvanto, Jing and Snijder, Robert and Wang, Xuewei and Lambrecht, Mark and Wolfinger, Russ and Antoni, Laurent and Servan, Angela and Pascoe, Katie and Robinson, Paul and Jaton, Bertrand and Bakkard, Daniel and Turunen, Heidi and Kilkku, Olavi and Pohjanjousi, Pasi and Voima, Olli and Nevalaita, Liina and Punakivi, Keijo and Reich, Christian and Seager, Sarah and Ratwani, Shilpa and Longden-Chapman, Elaine and Burke, Danny and Licour, Muriel and Payne, Sarah and Yong, Alan and Lujan, Flavia and Le Mare, Sophia and Hendrich, Jan and Bussmann, Michael and Juckeland, Guido and Kotik, Daniel and The PIONEER Consortium}, + title = {~~Unanswered questions in prostate cancer -- findings of an international multi-stakeholder consensus by the PIONEER consortium}, + doi = {10.1038/s41585-023-00748-9}, + year = {2023}, + abstract = {PIONEER is a European network of excellence for big data in prostate cancer consisting of 37 private and public stakeholders from 9 countries across Europe. Many progresses have been done in prostate cancer management, but unanswered questions in the field still exist, and big data could help to answer these questions. The PIONEER consortium conducted a two-round modified Delphi survey aiming at building consensus between two stakeholder groups -- health-care professionals and patients with prostate cancer -- about the most important questions in the field of prostate cancer to be answered using big data. Respondents were asked to consider what would be the effect of answering the proposed questions on improving diagnosis and treatment outcomes for patients with prostate cancer and to score these questions on a scale of 1 (not important) to 9 (critically important). The mean percentage of participants who scored each of the proposed questions as critically important was calculated across the two stakeholder groups and used to rank the questions and identify the highest scoring questions in the critically important category. The identification of questions in prostate cancer that are important to various stakeholders will help the PIONEER consortium to provide answers to these questions to improve the clinical care of patients with prostate cancer. In this Consensus Statement, the authors present results from an international multi-stakeholder consensus conducted by the PIONEER consortium to identify the most important questions in the field of prostate cancer that could be addressed using big data.}, + url = {http://dx.doi.org/10.1038/s41585-023-00748-9}, + file = {Omar23.pdf:pdf\\Omar23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Nature Reviews Urology}, + automatic = {yes}, + citation-count = {1}, + pages = {494-501}, + volume = {20}, + pmid = {37012441}, +} + +@article{Oost15, + author = {Oosterwijk-Wakka, Jeannette C. and de Weijert, Mirjam C.A. and Franssen, Gerben M. and Leenders, William P.J. and van der Laak, Jeroen A.W.M. and Boerman, Otto C. and Mulders, Peter F.A. and Oosterwijk, Egbert}, + title = {~~Successful Combination of Sunitinib and Girentuximab in Two Renal Cell Carcinoma Animal Models: A Rationale for Combination Treatment of Patients with Advanced RCC}, + doi = {10.1016/j.neo.2014.12.011}, + year = {2015}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.neo.2014.12.011}, + file = {Oost15.pdf:pdf\\Oost15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Neoplasia}, + automatic = {yes}, + citation-count = {10}, + pages = {215-224}, + volume = {17}, + pmid = {25748241}, +} + @article{Osch05, author = {Elisabeth Oschatz and Mathias Prokop and Martina Scharitzer and Michael Weber and Csilla Balassy and Cornelia {Schaefer-Prokop}}, title = {Comparison of liquid crystal versus cathode ray tube display for the detection of simulated chest lesions}, @@ -18214,6 +22594,22 @@ @article{Otte05 month = {5}, gsid = {12384820780576153322}, gscites = {115}, + ss_id = {395ad39f7459433b4b7a43aa100b5e681ee28a7d}, + all_ss_ids = {['395ad39f7459433b4b7a43aa100b5e681ee28a7d']}, +} + +@article{Otál22, + author = {Ot\'{a}lora, Sebastian and Marini, Niccol\'{o} and Podareanu, Damian and Hekster, Ruben and Tellez, David and Van Der Laak, Jeroen and M\"{u}ller, Henning and Atzori, Manfredo}, + title = {~~stainlib: a python library for augmentation and normalization of histopathology H&E images}, + doi = {10.1101/2022.05.17.492245}, + year = {2022}, + abstract = {AbstractComputational pathology is a domain of increasing scientific and social interest. The automatic analysis of histopathology images stained with Hematoxylin and Eosin (H&E) can help clinicians diagnose and quantify diseases. Computer vision methods based on deep learning can perform on par or better than pathologists in specific tasks [1, 2, 15]. Nevertheless, the visual heterogeneity in histopathology images due to batch effects, differences in preparation in different pathology laboratories, and the scanner can produce tissue appearance changes in the digitized whole-slide images. Such changes impede the application of the trained models in clinical scenarios where there is high variability in the images. We introduce stainlib, an easy-to-use and expandable python3 library that collects and unifies state-of-the-art methods for color augmentation and normalization of histopathology H&E images. stainlib also contains recent deep learning-based approaches that perform a robust stain-invariant training of CNN models. stainlib can help researchers build models robust to color domain shift by augmenting and harmonizing the training data, allowing the deployment of better models in the digital pathology practice.}, + url = {http://dx.doi.org/10.1101/2022.05.17.492245}, + file = {Otal22.pdf:pdf\\Otal22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Preprint}, + automatic = {yes}, + citation-count = {4}, } @mastersthesis{Oude19, @@ -18221,12 +22617,14 @@ @mastersthesis{Oude19 title = {Reversible Networks for Memory-efficient Image-to-Image Translation in 3D Medical Imaging}, year = {2019}, abstract = {The Pix2pix and CycleGAN losses have vastly improved the qualitative and quantitative visual quality of results in image-to-image translation tasks. We extend this framework by exploring approximately invertible architectures which are well suited to these losses. These architectures are approximately invertible by design and thus partially satisfy cycle-consistency before training even begins. Furthermore, since invertible architectures have constant memory complexity in depth, these models can be built arbitrarily deep. We are able to demonstrate superior quantitative output on the Cityscapes and Maps datasets. - - Additionally, we show that the model allows us to perform several memory-intensive medical imaging tasks, including a super-resolution problem on 3D MRI brain volumes. We also demonstrate that our model can perform a 3D domain-adaptation and 3D super-resolution task on chest CT volumes. By doing this, we provide a proof-of-principle for using reversible networks to create a model capable of pre-processing 3D CT scans to high resolution with a standardized appearance.}, + + Additionally, we show that the model allows us to perform several memory-intensive medical imaging tasks, including a super-resolution problem on 3D MRI brain volumes. We also demonstrate that our model can perform a 3D domain-adaptation and 3D super-resolution task on chest CT volumes. By doing this, we provide a proof-of-principle for using reversible networks to create a model capable of pre-processing 3D CT scans to high resolution with a standardized appearance.}, file = {Oude19.pdf:pdf/Oude19.pdf:PDF}, optnote = {DIAG}, school = {University of Amsterdam}, journal = {Master thesis}, + all_ss_ids = {03ef312b3d3e616fd7f0a2f2260c82ad62ed7ef1}, + gscites = {0}, } @inproceedings{Oude19a, @@ -18238,6 +22636,47 @@ @inproceedings{Oude19a abstract = {Medical imaging data are typically large in size. As a result, it can be difficult to train deep neural models on them. The activations of invertible neural networks do not have to be stored to perform backpropagation, therefore such networks can be used to save memory when handling large data volumes. We use a technique called additive coupling to obtain a memory-efficient partially-reversible image-to-image translation model. With this model, we perform a 3D super-resolution and 3D domain-adaptation task, on both paired and unpaired CT scan data. Additionally, we demonstrate experimentally that the model requires significantly less GPU memory than models without reversibility. samples as in or out-of distribution, our method achieves an AUC score of 0.99.}, file = {:pdf/Oude19a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {0ad54bc84119cf0a8ff04d939cc0b4575d7e9b5a}, + all_ss_ids = {['0ad54bc84119cf0a8ff04d939cc0b4575d7e9b5a']}, + gscites = {5}, +} + +@article{Page23, + author = {Page, David B and Broeckx, Glenn and Jahangir, Chowdhury Arif and Verbandt, Sara and Gupta, Rajarsi R and Thagaard, Jeppe and Khiroya, Reena and Kos, Zuzana and Abduljabbar, Khalid and Acosta Haab, Gabriela and Acs, Balazs and Akturk, Guray and Almeida, Jonas S and Alvarado-Cabrero, Isabel and Azmoudeh-Ardalan, Farid and Badve, Sunil and Baharun, Nurkhairul Bariyah and Bellolio, Enrique R and Bheemaraju, Vydehi and Blenman, Kim RM and Botinelly Mendon\c{c}a Fujimoto, Luciana and Bouchmaa, Najat and Burgues, Octavio and Cheang, Maggie Chon U and Ciompi, Francesco and Cooper, Lee AD and Coosemans, An and Corredor, Germ\'{a}n and Dantas Portela, Flavio Luis and Deman, Frederik and Demaria, Sandra and Dudgeon, Sarah N and Elghazawy, Mahmoud and Ely, Scott and Fernandez-Mart\'{i}n, Claudio and Fineberg, Susan and Fox, Stephen B and Gallagher, William M and Giltnane, Jennifer M and Gnjatic, Sacha and Gonzalez-Ericsson, Paula I and Grigoriadis, Anita and Halama, Niels and Hanna, Matthew G and Harbhajanka, Aparna and Hardas, Alexandros and Hart, Steven N and Hartman, Johan and Hewitt, Stephen and Hida, Akira I and Horlings, Hugo M and Husain, Zaheed and Hytopoulos, Evangelos and Irshad, Sheeba and Janssen, Emiel AM and Kahila, Mohamed and Kataoka, Tatsuki R and Kawaguchi, Kosuke and Kharidehal, Durga and Khramtsov, Andrey I and Kiraz, Umay and Kirtani, Pawan and Kodach, Liudmila L and Korski, Konstanty and Kov\'{a}cs, Anik\'{o} and Laenkholm, Anne-Vibeke and Lang-Schwarz, Corinna and Larsimont, Denis and Lennerz, Jochen K and Lerousseau, Marvin and Li, Xiaoxian and Ly, Amy and Madabhushi, Anant and Maley, Sai K and Manur Narasimhamurthy, Vidya and Marks, Douglas K and McDonald, Elizabeth S and Mehrotra, Ravi and Michiels, Stefan and Minhas, Fayyaz ul Amir Afsar and Mittal, Shachi and Moore, David A and Mushtaq, Shamim and Nighat, Hussain and Papathomas, Thomas and Penault-Llorca, Frederique and Perera, Rashindrie D and Pinard, Christopher J and Pinto-Cardenas, Juan Carlos and Pruneri, Giancarlo and Pusztai, Lajos and Rahman, Arman and Rajpoot, Nasir Mahmood and Rapoport, Bernardo Leon and Rau, Tilman T and Reis-Filho, Jorge S and Ribeiro, Joana M and Rimm, David and Vincent-Salomon, Anne and Salto-Tellez, Manuel and Saltz, Joel and Sayed, Shahin and Siziopikou, Kalliopi P and Sotiriou, Christos and Stenzinger, Albrecht and Sughayer, Maher A and Sur, Daniel and Symmans, Fraser and Tanaka, Sunao and Taxter, Timothy and Tejpar, Sabine and Teuwen, Jonas and Thompson, E Aubrey and Tramm, Trine and Tran, William T and van der Laak, Jeroen and van Diest, Paul J and Verghese, Gregory E and Viale, Giuseppe and Vieth, Michael and Wahab, Noorul and Walter, Thomas and Waumans, Yannick and Wen, Hannah Y and Yang, Wentao and Yuan, Yinyin and Adams, Sylvia and Bartlett, John Mark Seaverns and Loibl, Sibylle and Denkert, Carsten and Savas, Peter and Loi, Sherene and Salgado, Roberto and Specht Stovgaard, Elisabeth}, + title = {Spatial analyses of immune cell infiltration in cancer: current methods and future directions: A report of the International Immuno-Oncology Biomarker Working Group on Breast Cancer}, + doi = {10.1002/path.6165}, + year = {2023}, + abstract = {Modern histologic imaging platforms coupled with machine learning methods have provided new opportunities to map the spatial distribution of immune cells in the tumor microenvironment. However, there exists no standardized method for describing or analyzing spatial immune cell data, and most reported spatial analyses are rudimentary. In this review, we provide an overview of two approaches for reporting and analyzing spatial data (raster versus vector-based). We then provide a compendium of spatial immune cell metrics that have been reported in the literature, summarizing prognostic associations in the context of a variety of cancers. We conclude by discussing two well-described clinical biomarkers, the breast cancer stromal tumor infiltrating lymphocytes score and the colon cancer Immunoscore, and describe investigative opportunities to improve clinical utility of these spatial biomarkers. (c) 2023 The Pathological Society of Great Britain and Ireland.}, + url = {http://dx.doi.org/10.1002/path.6165}, + file = {Page23.pdf:pdf\Page23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {The Journal of Pathology}, + citation-count = {1}, + automatic = {yes}, + pages = {514-532}, + volume = {260}, + ss_id = {1ad43eea9d49d403c8155ebd4a85bfc87f0d05ca}, + all_ss_ids = {['1ad43eea9d49d403c8155ebd4a85bfc87f0d05ca']}, + gscites = {9}, +} + +@article{Palm23, + author = {Palmer, Megan and Seddon, James A. and van der Zalm, Marieke M. and Hesseling, Anneke C. and Goussard, Pierre and Schaaf, H. Simon and Morrison, Julie and van Ginneken, Bram and Melendez, Jaime and Walters, Elisabetta and Murphy, Keelin}, + title = {Optimising computer aided detection to identify intra-thoracic tuberculosis on chest x-ray in South African children}, + doi = {10.1371/journal.pgph.0001799}, + year = {2023}, + abstract = {Diagnostic tools for paediatric tuberculosis remain limited, with heavy reliance on clinical algorithms which include chest x-ray. Computer aided detection (CAD) for tuberculosis on chest x-ray has shown promise in adults. We aimed to measure and optimise the performance of an adult CAD system, CAD4TB, to identify tuberculosis on chest x-rays from children with presumptive tuberculosis. Chest x-rays from 620 children <13 years enrolled in a prospective observational diagnostic study in South Africa, were evaluated. All chest x-rays were read by a panel of expert readers who attributed each with a radiological reference of either 'tuberculosis' or 'not tuberculosis'. Of the 525 chest x-rays included in this analysis, 80 (40 with a reference of 'tuberculosis' and 40 with 'not tuberculosis') were allocated to an independent test set. The remainder made up the training set. The performance of CAD4TB to identify 'tuberculosis' versus 'not tuberculosis' on chest x-ray against the radiological reference read was calculated. The CAD4TB software was then fine-tuned using the paediatric training set. We compared the performance of the fine-tuned model to the original model. Our findings were that the area under the receiver operating characteristic curve (AUC) of the original CAD4TB model, prior to fine-tuning, was 0.58. After fine-tuning there was an improvement in the AUC to 0.72 (p = 0.0016). In this first-ever description of the use of CAD to identify tuberculosis on chest x-ray in children, we demonstrate a significant improvement in the performance of CAD4TB after fine-tuning with a set of well-characterised paediatric chest x-rays. CAD has the potential to be a useful additional diagnostic tool for paediatric tuberculosis. We recommend replicating the methods we describe using a larger chest x-ray dataset from a more diverse population and evaluating the potential role of CAD to replace a human-read chest x-ray within treatment-decision algorithms for paediatric tuberculosis.}, + url = {http://dx.doi.org/10.1371/journal.pgph.0001799}, + file = {Palm23.pdf:pdf\Palm23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {PLOS Global Public Health}, + citation-count = {1}, + automatic = {yes}, + pages = {e0001799}, + volume = {3}, + all_ss_ids = {8a039fe22daf6f65a32ba02035f45a8c67b48339}, + pmid = {37192175}, + gscites = {0}, } @article{Pate10, @@ -18262,13 +22701,13 @@ @conference{Pate17 booktitle = ECR, year = {2017}, abstract = {PURPOSE - Cranial cavity segmentation in CT is the essential first step for subsequent image processing and automated detection of cerebral pathology. This becomes complicated in the presence of skull fractures, metallic foreign objects or due to connected soft tissues such as the orbit. A robust and accurate method is presented to segment the cranial cavity in CT images. - METHOD AND MATERIALS - We propose a multi-atlas based method that uses atlas selection based on anterior skull variations, followed by a two-stage levelset refinement. The method was developed using a set of 99 non-contrast CT and 18 CT perfusion (CTP) scans obtained for emergency indications on a 320-row detector CT scanner. It was evaluated on a different set of 200 non-contrast CT and 100 CTP scans obtained for the same indications. Quality of segmentations was visually assessed. The reference standard consisted of three randomly selected orthogonal slices per patient that were manually annotated by trained observers. The corresponding slices were extracted and compared to the reference standard. Dice similarity coefficient (DSC) and 95th percentile Hausdorff distance (95% HD) were reported. - RESULTS - The segmentation results were evaluated as very good to excellent. The method achieved a mean DSC of 0.98 +- 0.03 and mean 95% HD of 0.60 +- 2.15 mm in comparison to the reference standard. - CONCLUSION - The proposed method is capable of accurate segmentation of the cranial cavity in non-contrast CT and CTP independent of gross pathology or foreign objects. The method provides a fundamental first step towards automated evaluation of cranial CT.}, + Cranial cavity segmentation in CT is the essential first step for subsequent image processing and automated detection of cerebral pathology. This becomes complicated in the presence of skull fractures, metallic foreign objects or due to connected soft tissues such as the orbit. A robust and accurate method is presented to segment the cranial cavity in CT images. + METHOD AND MATERIALS + We propose a multi-atlas based method that uses atlas selection based on anterior skull variations, followed by a two-stage levelset refinement. The method was developed using a set of 99 non-contrast CT and 18 CT perfusion (CTP) scans obtained for emergency indications on a 320-row detector CT scanner. It was evaluated on a different set of 200 non-contrast CT and 100 CTP scans obtained for the same indications. Quality of segmentations was visually assessed. The reference standard consisted of three randomly selected orthogonal slices per patient that were manually annotated by trained observers. The corresponding slices were extracted and compared to the reference standard. Dice similarity coefficient (DSC) and 95th percentile Hausdorff distance (95% HD) were reported. + RESULTS + The segmentation results were evaluated as very good to excellent. The method achieved a mean DSC of 0.98 +- 0.03 and mean 95% HD of 0.60 +- 2.15 mm in comparison to the reference standard. + CONCLUSION + The proposed method is capable of accurate segmentation of the cranial cavity in non-contrast CT and CTP independent of gross pathology or foreign objects. The method provides a fundamental first step towards automated evaluation of cranial CT.}, optnote = {DIAG}, } @@ -18285,7 +22724,9 @@ @inproceedings{Pate17a optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {465482443091202827}, - gscites = {6}, + gscites = {8}, + ss_id = {be15a442d2cf19f1bbbd779273055a2de727380c}, + all_ss_ids = {['be15a442d2cf19f1bbbd779273055a2de727380c']}, } @article{Pate17b, @@ -18304,8 +22745,10 @@ @article{Pate17b pmid = {28011374}, publisher = {Elsevier {BV}}, gsid = {5343166311394276117}, - gscites = {23}, + gscites = {26}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/170408}, + ss_id = {3f054da4c1d6606941c18c65ae6e435a49758b7b}, + all_ss_ids = {['3f054da4c1d6606941c18c65ae6e435a49758b7b']}, } @inproceedings{Pate18, @@ -18321,7 +22764,9 @@ @inproceedings{Pate18 optnote = {DIAG}, month = {2}, gsid = {18260974248343960899}, - gscites = {3}, + gscites = {4}, + ss_id = {3a0848c228f2bb19ce1afd4871105eb6a317a3dd}, + all_ss_ids = {['3a0848c228f2bb19ce1afd4871105eb6a317a3dd']}, } @article{Pate19, @@ -18338,7 +22783,9 @@ @article{Pate19 file = {:pdf/Pate19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {6029967581749349651}, - gscites = {3}, + gscites = {41}, + ss_id = {59399693fa81c3142552dac2e4456eb066e62c60}, + all_ss_ids = {['59399693fa81c3142552dac2e4456eb066e62c60']}, } @article{Pate19a, @@ -18351,21 +22798,50 @@ @article{Pate19a pages = {17858}, doi = {10.1038/s41598-019-54491-6}, abstract = {A 3-dimensional (3D) convolutional neural network is presented for the segmentation and quantification of spontaneous - intracerebral haemorrhage (ICH) in non-contrast computed tomography (NCCT). The method utilises a combination of - contextual information on multiple scales for fast and fully automatic dense predictions. To handle a large class imbalance - present in the data, a weight map is introduced during training. The method was evaluated on two datasets of 25 and 50 - patients respectively. The reference standard consisted of manual annotations for each ICH in the dataset. Quantitative - analysis showed a median Dice similarity coefficient of 0.91 [0.87 - 0.94] and 0.90 [0.85 - 0.92] for the two test datasets in - comparison to the reference standards. Evaluation of a separate dataset of 5 patients for the assessment of the observer - variability produced a mean Dice similarity coefficient of 0.95 +/- 0.02 for the inter-observer variability and 0.97 +/- 0.01 for the - intra-observer variability. The average prediction time for an entire volume was 104 +/- 15 seconds. The results demonstrate - that the method is accurate and approaches the performance of expert manual annotation.}, + intracerebral haemorrhage (ICH) in non-contrast computed tomography (NCCT). The method utilises a combination of + contextual information on multiple scales for fast and fully automatic dense predictions. To handle a large class imbalance + present in the data, a weight map is introduced during training. The method was evaluated on two datasets of 25 and 50 + patients respectively. The reference standard consisted of manual annotations for each ICH in the dataset. Quantitative + analysis showed a median Dice similarity coefficient of 0.91 [0.87 - 0.94] and 0.90 [0.85 - 0.92] for the two test datasets in + comparison to the reference standards. Evaluation of a separate dataset of 5 patients for the assessment of the observer + variability produced a mean Dice similarity coefficient of 0.95 +/- 0.02 for the inter-observer variability and 0.97 +/- 0.01 for the + intra-observer variability. The average prediction time for an entire volume was 104 +/- 15 seconds. The results demonstrate + that the method is accurate and approaches the performance of expert manual annotation.}, file = {:pdf/Pate19a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {31780815}, month = {11}, gsid = {102660831423117160}, - gscites = {1}, + gscites = {36}, + ss_id = {03254edc77c0459ad9fd9ea458c3ae171c0a4247}, + all_ss_ids = {['03254edc77c0459ad9fd9ea458c3ae171c0a4247']}, +} + +@phdthesis{Pate23, + author = {Ajay Patel}, + title = {Automated Image Analysis of Cranial Non-Contrast CT}, + url = {https://repository.ubn.ru.nl/handle/2066/292990}, + abstract = {Stroke and intracranial hemorrhage (a brain bleed) are serious medical conditions that require fast and accurate diagnosis to aid clinicians in making treatment decisions. Computed tomography (CT) is a widely available and fast imaging technique used for diagnosis, but it relies on interpretation by a clinician. To aid in the diagnosis of stroke and intracranial hemorrhage, artificial intelligence algorithms for computer-aided diagnosis (CAD) have been developed to automatically analyze CT images. This research presents different methods that demonstrate accurate segmentations of large cerebral anatomy and the ability to quantify and segment intracerebral hemorrhages for further analysis. Whole 3D non-contrast CT images were also analyzed automatically to identify the presence of intracranial hemorrhage with high sensitivity. With further development, CAD systems will be able to assist physicians in diagnosing and predicting outcomes of stroke and intracranial hemorrhage in clinical settings.}, + copromotor = {R. Manniesing}, + file = {:pdf/Pate23.pdf:PDF}, + journal = {PhD thesis}, + optnote = {DIAG, RADIOLOGY}, + promotor = {B. van Ginneken and M. Prokop}, + school = {Radboud University, Nijmegen}, + year = {2023}, +} + +@article{Pawl19, + author = {Pawlowski, Nick and Bhooshan, Suvrat and Ballas, Nicolas and Ciompi, Francesco and Glocker, Ben and Drozdzal, Michal}, + title = {~~Needles in Haystacks: On Classifying Tiny Objects in Large Images}, + doi = {10.48550/ARXIV.1908.06037}, + year = {2019}, + abstract = {In some important computer vision domains, such as medical or hyperspectral imaging, we care about the classification of tiny objects in large images. However, most Convolutional Neural Networks (CNNs) for image classification were developed using biased datasets that contain large objects, in mostly central image positions. To assess whether classical CNN architectures work well for tiny object classification we build a comprehensive testbed containing two datasets: one derived from MNIST digits and one from histopathology images. This testbed allows controlled experiments to stress-test CNN architectures with a broad spectrum of signal-to-noise ratios. Our observations indicate that: (1) There exists a limit to signal-to-noise below which CNNs fail to generalize and that this limit is affected by dataset size - more data leading to better performances; however, the amount of training data required for the model to generalize scales rapidly with the inverse of the object-to-image ratio (2) in general, higher capacity models exhibit better generalization; (3) when knowing the approximate object sizes, adapting receptive field is beneficial; and (4) for very small signal-to-noise ratio the choice of global pooling operation affects optimization, whereas for relatively large signal-to-noise values, all tested global pooling operations exhibit similar performance.}, + url = {https://arxiv.org/abs/1908.06037}, + file = {Pawl19.pdf:pdf\\Pawl19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:1908.06037}, + automatic = {yes}, } @mastersthesis{Paye20, @@ -18377,6 +22853,8 @@ @mastersthesis{Paye20 optnote = {DIAG}, school = {Radboud University}, journal = {Master thesis}, + all_ss_ids = {ed8cbf0b3373cf1dcadd2718dfd3daf6fbe068f3}, + gscites = {0}, } @inproceedings{Peem13, @@ -18392,6 +22870,19 @@ @inproceedings{Peem13 month = {10}, } +@conference{Peet23, + author = {Peeters, Dr\'{e} and Alves, Nat\'{a}lia and Venkadesh, K and Dinnessen, R and Saghir, Z and Scholten, E and Huisman, H and Schaefer-Prokop, C and Vliegenthart, R and Prokop, M and Jacobs, C}, + booktitle = ECR, + title = {The effect of applying an uncertainty estimation method on the performance of a deep learning model for nodule malignancy risk estimation}, + abstract = {Purpose: Artificial Intelligence (AI) algorithms often lack uncertainty estimation for classification tasks. Uncertainty estimation may however be an important requirement for clinical adoption of AI algorithms. In this study, we integrate a method for uncertainty estimation into a previously developed AI algorithm and investigate the performance when applying different uncertainty thresholds. + Methods and materials: We used a retrospective external validation dataset from the Danish Lung Cancer Screening Trial, containing 818 benign and 65 malignant nodules. Our previously developed AI algorithm for nodule malignancy risk estimation was extended with a method for measuring the prediction uncertainty. The uncertainty score (UnS) was calculated by measuring the standard deviation over 20 different predictions of an ensemble of AI models. Two UnS thresholds at the 90th and 95th percentile were applied to retain 90% and 95% of all cases as certain, respectively. For these scenarios, we calculated the area under the ROC curve (AUC) for certain and uncertain cases, and for the full set of nodules. + Results: On the full set of 883 nodules, the AUC of the AI risk score was 0.932. For the 90th and 95th percentile, the AUC of the AI risk score for certain cases was 0.934 and 0.935, respectively, and for the uncertain cases was 0.710 and 0.688, respectively. + Conclusion: In this retrospective data set, we demonstrate that integrating an uncertainty estimation method into a deep learning-based nodule malignancy risk estimation algorithm slightly increased the performance on certain cases. The AI performance is substantially worse on uncertain cases and therefore in need of human visual review. + Limitations: This study is a retrospective analysis on data from one single lung cancer screening trial. More external validation is needed.}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, +} + @conference{Pegg17a, author = {Sjoert AH Pegge and Midas Meijs and Mathias Prokop and Rashindra Manniesing and Frederick JA Meijer}, title = {Color-mapping of {4D-CTA} for the detection and classification of cranial arteriovenous fistulas}, @@ -18402,6 +22893,39 @@ @conference{Pegg17a optnote = {DIAG, RADIOLOGY}, } +@article{Peis22, + author = {Peisen, Felix and H\"{a}nsch, Annika and Hering, Alessa and Brendlin, Andreas S. and Afat, Saif and Nikolaou, Konstantin and Gatidis, Sergios and Eigentler, Thomas and Amaral, Teresa and Moltz, Jan H. and Othman, Ahmed E.}, + title = {~~Combination of Whole-Body Baseline CT Radiomics and Clinical Parameters to Predict Response and Survival in a Stage-IV Melanoma Cohort Undergoing Immunotherapy}, + doi = {10.3390/cancers14122992}, + year = {2022}, + abstract = {Background: This study investigated whether a machine-learning-based combination of radiomics and clinical parameters was superior to the use of clinical parameters alone in predicting therapy response after three months, and overall survival after six and twelve months, in stage-IV malignant melanoma patients undergoing immunotherapy with PD-1 checkpoint inhibitors and CTLA-4 checkpoint inhibitors. Methods: A random forest model using clinical parameters (demographic variables and tumor markers = baseline model) was compared to a random forest model using clinical parameters and radiomics (extended model) via repeated 5-fold cross-validation. For this purpose, the baseline computed tomographies of 262 stage-IV malignant melanoma patients treated at a tertiary referral center were identified in the Central Malignant Melanoma Registry, and all visible metastases were three-dimensionally segmented (n = 6404). Results: The extended model was not significantly superior compared to the baseline model for survival prediction after six and twelve months (AUC (95% CI): 0.664 (0.598, 0.729) vs. 0.620 (0.545, 0.692) and AUC (95% CI): 0.600 (0.526, 0.667) vs. 0.588 (0.481, 0.629), respectively). The extended model was not significantly superior compared to the baseline model for response prediction after three months (AUC (95% CI): 0.641 (0.581, 0.700) vs. 0.656 (0.587, 0.719)). Conclusions: The study indicated a potential, but non-significant, added value of radiomics for six-month and twelve-month survival prediction of stage-IV melanoma patients undergoing immunotherapy.}, + url = {http://dx.doi.org/10.3390/cancers14122992}, + file = {Peis22.pdf:pdf\\Peis22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancers}, + automatic = {yes}, + citation-count = {9}, + pages = {2992}, + volume = {14}, + pmid = {35740659}, +} + +@article{Peis23, + author = {Peisen, Felix and Gerken, Annika and Hering, Alessa and Dahm, Isabel and Nikolaou, Konstantin and Gatidis, Sergios and Eigentler, Thomas K. and Amaral, Teresa and Moltz, Jan H. and Othman, Ahmed E.}, + title = {~~Can Whole-Body Baseline CT Radiomics Add Information to the Prediction of Best Response, Progression-Free Survival, and Overall Survival of Stage IV Melanoma Patients Receiving First-Line Targeted Therapy: A Retrospective Register Study}, + doi = {10.3390/diagnostics13203210}, + year = {2023}, + abstract = {Background: The aim of this study was to investigate whether the combination of radiomics and clinical parameters in a machine-learning model offers additive information compared with the use of only clinical parameters in predicting the best response, progression-free survival after six months, as well as overall survival after six and twelve months in patients with stage IV malignant melanoma undergoing first-line targeted therapy. Methods: A baseline machine-learning model using clinical variables (demographic parameters and tumor markers) was compared with an extended model using clinical variables and radiomic features of the whole tumor burden, utilizing repeated five-fold cross-validation. Baseline CTs of 91 stage IV malignant melanoma patients, all treated in the same university hospital, were identified in the Central Malignant Melanoma Registry and all metastases were volumetrically segmented (n = 4727). Results: Compared with the baseline model, the extended radiomics model did not add significantly more information to the best-response prediction (AUC [95% CI] 0.548 (0.188, 0.808) vs. 0.487 (0.139, 0.743)), the prediction of PFS after six months (AUC [95% CI] 0.699 (0.436, 0.958) vs. 0.604 (0.373, 0.867)), or the overall survival prediction after six and twelve months (AUC [95% CI] 0.685 (0.188, 0.967) vs. 0.766 (0.433, 1.000) and AUC [95% CI] 0.554 (0.163, 0.781) vs. 0.616 (0.271, 1.000), respectively). Conclusions: The results showed no additional value of baseline whole-body CT radiomics for best-response prediction, progression-free survival prediction for six months, or six-month and twelve-month overall survival prediction for stage IV melanoma patients receiving first-line targeted therapy. These results need to be validated in a larger cohort.}, + url = {http://dx.doi.org/10.3390/diagnostics13203210}, + file = {Peis23.pdf:pdf\\Peis23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Diagnostics}, + automatic = {yes}, + citation-count = {0}, + pages = {3210}, + volume = {13}, +} + @article{Pelo07, author = {P. Peloschek and J. Sailer and M. Weber and C. J. Herold and M. Prokop and C. M. Schaefer-Prokop}, title = {Pulmonary nodules: sensitivity of maximum intensity projection versus that of volume rendering of {3D} multidetector {CT} data}, @@ -18418,6 +22942,21 @@ @article{Pelo07 gscites = {71}, } +@article{Penz21, + author = {Penzkofer, Tobias and Padhani, Anwar R and Turkbey, Baris and Haider, Masoom A and Huisman, Henkjan and Walz, Jochen and Salomon, Georg and Schoots, Ivo G and Richenberg, Jonathan and Villeirs, Geert and Panebianco, Valeria and Rouviere, Olivier and Logager, Vibeke Berg and Barentsz, Jelle}, + title = {ESUR/ESUI position paper: developing artificial intelligence for precision diagnosis of prostate cancer using magnetic resonance imaging.}, + doi = {10.1007/s00330-021-08021-6}, + abstract = {Artificial intelligence developments are essential to the successful deployment of community-wide, MRI-driven prostate cancer diagnosis. AI systems should ensure that the main benefits of biopsy avoidance are delivered while maintaining consistent high specificities, at a range of disease prevalences. Since all current artificial intelligence / computer-aided detection systems for prostate cancer detection are experimental, multiple developmental efforts are still needed to bring the vision to fruition. Initial work needs to focus on developing systems as diagnostic supporting aids so their results can be integrated into the radiologists' workflow including gland and target outlining tasks for fusion biopsies. Developing AI systems as clinical decision-making tools will require greater efforts. The latter encompass larger multicentric, multivendor datasets where the different needs of patients stratified by diagnostic settings, disease prevalence, patient preference, and clinical setting are considered. AI-based, robust, standard operating procedures will increase the confidence of patients and payers, thus enabling the wider adoption of the MRI-directed approach for prostate cancer diagnosis. KEY POINTS: * AI systems need to ensure that the benefits of biopsy avoidance are delivered with consistent high specificities, at a range of disease prevalence. * Initial work has focused on developing systems as diagnostic supporting aids for outlining tasks, so they can be integrated into the radiologists' workflow to support MRI-directed biopsies. * Decision support tools require a larger body of work including multicentric, multivendor studies where the clinical needs, disease prevalence, patient preferences, and clinical setting are additionally defined.}, + journal = ER, + month = may, + pmid = {33991226}, + year = {2021}, + taverne_url = {https://repository.ubn.ru.nl/handle/2066/245173}, + ss_id = {b7839ad75c2d6ee26bd37e47accf1cc4389e297f}, + all_ss_ids = {['b7839ad75c2d6ee26bd37e47accf1cc4389e297f']}, + gscites = {30}, +} + @article{Peri21, author = {Perik, T. H. and van Genugten, E. A. J. and Aarntzen, E. H. J. G. and Smit, E. J. and Huisman, H. J. and Hermans, J. J.}, title = {Quantitative CT perfusion imaging in patients with pancreatic cancer: a systematic review}, @@ -18426,6 +22965,9 @@ @article{Peri21 file = {:pdf/Peri21.pdf:PDF}, journal = {Abdominal Radiology}, year = {2021}, + ss_id = {90b8115c01d3e9da20b6a5ac11139dc9d9946127}, + all_ss_ids = {['90b8115c01d3e9da20b6a5ac11139dc9d9946127']}, + gscites = {7}, } @book{Pete20, @@ -18440,6 +22982,91 @@ @book{Pete20 abstract = {This book constitutes the proceedings of the Second International Workshop on Thoracic Image Analysis, TIA 2020, held in Lima, Peru, in October 2020. Due to COVID-19 pandemic the conference was held virtually. COVID-19 infection has brought a lot of attention to lung imaging and the role of CT imaging in the diagnostic workflow of COVID-19 suspects is an important topic. The 14 full papers presented deal with all aspects of image analysis of thoracic data, including: image acquisition and reconstruction, segmentation, registration, quantification, visualization, validation, population-based modeling, biophysical modeling (computational anatomy), deep learning, image analysis in small animals, outcome-based research and novel infectious disease applications.}, file = {Pete20.pdf:pdf/Pete20.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {2a7dafe1287670068300ff77401923f7e151b9f4}, + gscites = {0}, +} + +@article{Pfaf22, + author = {Pfaffenrot, Viktor and Koopmans, Peter J.}, + title = {~~Magnetization transfer weighted laminar fMRI with multi-echo FLASH}, + doi = {10.1016/j.neuroimage.2022.119725}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.neuroimage.2022.119725}, + file = {Pfaf22.pdf:pdf\\Pfaf22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {NeuroImage}, + automatic = {yes}, + citation-count = {1}, + pages = {119725}, + volume = {264}, + pmid = {36328273}, +} + +@article{Pfob22, + author = {Pfob, Andr\'{e} and Sidey-Gibbons, Chris and Barr, Richard G. and Duda, Volker and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Fastner, Sarah and Gomez, Christina and Goncalo, Manuela and Gruber, Ines and Hahn, Markus and Hennigs, Andr\'{e} and Kapetas, Panagiotis and Lu, Sheng-Chieh and Nees, Juliane and Ohlinger, Ralf and Riedel, Fabian and Rutten, Matthieu and Schaefgen, Benedikt and Schuessler, Maximilian and Stieber, Anne and Togawa, Riku and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Xu, Cai and Rauch, Geraldine and Heil, Joerg and Golatta, Michael}, + title = {~~The importance of multi-modal imaging and clinical information for humans and AI-based algorithms to classify breast masses (INSPiRED 003): an international, multicenter analysis}, + doi = {10.1007/s00330-021-08519-z}, + year = {2022}, + abstract = {Abstract Objectives + AI-based algorithms for medical image analysis showed comparable performance to human image readers. However, in practice, diagnoses are made using multiple imaging modalities alongside other data sources. We determined the importance of this multi-modal information and compared the diagnostic performance of routine breast cancer diagnosis to breast ultrasound interpretations by humans or AI-based algorithms. + + Methods + Patients were recruited as part of a multicenter trial (NCT02638935). The trial enrolled 1288 women undergoing routine breast cancer diagnosis (multi-modal imaging, demographic, and clinical information). Three physicians specialized in ultrasound diagnosis performed a second read of all ultrasound images. We used data from 11 of 12 study sites to develop two machine learning (ML) algorithms using unimodal information (ultrasound features generated by the ultrasound experts) to classify breast masses which were validated on the remaining study site. The same ML algorithms were subsequently developed and validated on multi-modal information (clinical and demographic information plus ultrasound features). We assessed performance using area under the curve (AUC). + + Results + Of 1288 breast masses, 368 (28.6%) were histopathologically malignant. In the external validation set (n = 373), the performance of the two unimodal ultrasound ML algorithms (AUC 0.83 and 0.82) was commensurate with performance of the human ultrasound experts (AUC 0.82 to 0.84; p for all comparisons > 0.05). The multi-modal ultrasound ML algorithms performed significantly better (AUC 0.90 and 0.89) but were statistically inferior to routine breast cancer diagnosis (AUC 0.95, p for all comparisons <= 0.05). + + Conclusions + The performance of humans and AI-based algorithms improves with multi-modal information. + + Key Points + * The performance of humans and AI-based algorithms improves with multi-modal information. + * Multimodal AI-based algorithms do not necessarily outperform expert humans. + * Unimodal AI-based algorithms do not represent optimal performance to classify breast masses. + }, + url = {http://dx.doi.org/10.1007/s00330-021-08519-z}, + file = {Pfob22.pdf:pdf\\Pfob22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {7}, + pages = {4101-4115}, + volume = {32}, + pmid = {36283244}, +} + +@article{Pfob22, + author = {Pfob, Andr\'{e} and Sidey-Gibbons, Chris and Barr, Richard G. and Duda, Volker and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Fastner, Sarah and Gomez, Christina and Goncalo, Manuela and Gruber, Ines and Hahn, Markus and Hennigs, Andr\'{e} and Kapetas, Panagiotis and Lu, Sheng-Chieh and Nees, Juliane and Ohlinger, Ralf and Riedel, Fabian and Rutten, Matthieu and Schaefgen, Benedikt and Schuessler, Maximilian and Stieber, Anne and Togawa, Riku and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Xu, Cai and Rauch, Geraldine and Heil, Joerg and Golatta, Michael}, + title = {~~The importance of multi-modal imaging and clinical information for humans and AI-based algorithms to classify breast masses (INSPiRED 003): an international, multicenter analysis}, + doi = {10.1007/s00330-021-08519-z}, + year = {2022}, + abstract = {Abstract Objectives + AI-based algorithms for medical image analysis showed comparable performance to human image readers. However, in practice, diagnoses are made using multiple imaging modalities alongside other data sources. We determined the importance of this multi-modal information and compared the diagnostic performance of routine breast cancer diagnosis to breast ultrasound interpretations by humans or AI-based algorithms. + + Methods + Patients were recruited as part of a multicenter trial (NCT02638935). The trial enrolled 1288 women undergoing routine breast cancer diagnosis (multi-modal imaging, demographic, and clinical information). Three physicians specialized in ultrasound diagnosis performed a second read of all ultrasound images. We used data from 11 of 12 study sites to develop two machine learning (ML) algorithms using unimodal information (ultrasound features generated by the ultrasound experts) to classify breast masses which were validated on the remaining study site. The same ML algorithms were subsequently developed and validated on multi-modal information (clinical and demographic information plus ultrasound features). We assessed performance using area under the curve (AUC). + + Results + Of 1288 breast masses, 368 (28.6%) were histopathologically malignant. In the external validation set (n = 373), the performance of the two unimodal ultrasound ML algorithms (AUC 0.83 and 0.82) was commensurate with performance of the human ultrasound experts (AUC 0.82 to 0.84; p for all comparisons > 0.05). The multi-modal ultrasound ML algorithms performed significantly better (AUC 0.90 and 0.89) but were statistically inferior to routine breast cancer diagnosis (AUC 0.95, p for all comparisons <= 0.05). + + Conclusions + The performance of humans and AI-based algorithms improves with multi-modal information. + + Key Points + * The performance of humans and AI-based algorithms improves with multi-modal information. + * Multimodal AI-based algorithms do not necessarily outperform expert humans. + * Unimodal AI-based algorithms do not represent optimal performance to classify breast masses. + }, + url = {http://dx.doi.org/10.1007/s00330-021-08519-z}, + file = {Pfob22.pdf:pdf\\Pfob22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {7}, + pages = {4101-4115}, + volume = {32}, + pmid = {36283244}, } @inproceedings{Phil13, @@ -18457,6 +23084,8 @@ @inproceedings{Phil13 month = {2}, gsid = {7051742070683067485}, gscites = {10}, + ss_id = {7841c4cf59be69cbba55ff31f302d0e1d6323bff}, + all_ss_ids = {['7841c4cf59be69cbba55ff31f302d0e1d6323bff']}, } @conference{Phil14a, @@ -18483,8 +23112,10 @@ @article{Phil15 pmid = {25838517}, month = {9}, gsid = {15559638283935030636}, - gscites = {27}, + gscites = {35}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/153295}, + ss_id = {31be3cce5c1e9ae715c5fa2d97d1c0ea5b23223a}, + all_ss_ids = {['31be3cce5c1e9ae715c5fa2d97d1c0ea5b23223a']}, } @conference{Phil15a, @@ -18509,7 +23140,9 @@ @article{Phil15b pmid = {26212560}, month = {7}, gsid = {6367627082351532761}, - gscites = {36}, + gscites = {63}, + ss_id = {1e265722259ad5880226c71ac082a6c8d6f11d84}, + all_ss_ids = {['1e265722259ad5880226c71ac082a6c8d6f11d84']}, } @phdthesis{Phil19, @@ -18542,6 +23175,20 @@ @article{Phil19a pmid = {31439111}, year = {2019}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/207373}, + ss_id = {b5a66ad408861f79a1e6606da4ed23f772a7eb02}, + all_ss_ids = {['b5a66ad408861f79a1e6606da4ed23f772a7eb02']}, + gscites = {10}, +} + +@mastersthesis{Phil23, + author = {Philipp, Lena}, + title = {Body Composition Assessment in 3D CT Images}, + abstract = {Body composition as a diagnostic and prognostic biomarker is gaining importance in various medical fields such as oncology. Therefore, accurate quantification methods are necessary, such as analyzing CT images. While several studies introduced deep learning approaches to automatically segment a single slice, quantifying body composition in 3D remains understudied due to the high required annotation effort. This thesis explores the use of annotation-efficient strategies in developing a body composition segmentation model for the abdomen and pelvis. To address this, a fine-tuning strategy was proposed to optimize the annotation process and extend the model's generalization performance trained with L3 slices to the entire abdomen and pelvis. Moreover, a self-supervised pre-training using a contrastive loss was employed to leverage unlabeled data. The goal was to efficiently use the annotated data in developing the segmentation model. The results showed a significant acceleration of the annotation process. However, the pre-training added only limited benefits. The final model achieved excellent results with dice scores of SM: 0.96, VAT: 0.93, and SAT: 0.97. The insights gained from this study are useful to improve annotation procedures, and the developed body composition segmentation model can be used for further evaluation.}, + file = {Phil23.pdf:pdf\\Phil23.pdf:PDF}, + journal = {Master thesis}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2023}, } @conference{Pinc18, @@ -18553,6 +23200,8 @@ @conference{Pinc18 file = {Pinc18.pdf:PDF\\Pinc18.pdf:PDF}, optnote = {DIAG}, year = {2018}, + all_ss_ids = {ab07173f4e352d07f48eddd67136e8e33573aecf}, + gscites = {12}, } @inproceedings{Pinc19, @@ -18568,7 +23217,22 @@ @inproceedings{Pinc19 number = {1}, month = {3}, gsid = {6317852457997732629}, - gscites = {1}, + gscites = {3}, + ss_id = {c14d3cf3f27b22bb8e6a3be573095173e7a917f1}, + all_ss_ids = {['c14d3cf3f27b22bb8e6a3be573095173e7a917f1']}, +} + +@article{Pinc19a, + author = {Pinckaers, Hans and Litjens, Geert}, + title = {~~Neural Ordinary Differential Equations for Semantic Segmentation of Individual Colon Glands}, + doi = {10.48550/ARXIV.1910.10470}, + year = {2019}, + abstract = {Automated medical image segmentation plays a key role in quantitative research and diagnostics. Convolutional neural networks based on the U-Net architecture are the state-of-the-art. A key disadvantage is the hard-coding of the receptive field size, which requires architecture optimization for each segmentation task. Furthermore, increasing the receptive field results in an increasing number of weights. Recently, Neural Ordinary Differential Equations (NODE) have been proposed, a new type of continuous depth deep neural network. This framework allows for a dynamic receptive field at a fixed memory cost and a smaller amount of parameters. We show on a colon gland segmentation dataset (GlaS) that these NODEs can be used within the U-Net framework to improve segmentation results while reducing memory load and parameter counts.}, + url = {https://arxiv.org/abs/1910.10470}, + file = {Pinc19a.pdf:pdf\\Pinc19a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:1910.10470}, + automatic = {yes}, } @article{Pinc20, @@ -18583,6 +23247,9 @@ @article{Pinc20 optnote = {DIAG, INPRESS}, pmid = {32845835}, year = {2020}, + ss_id = {499846b295c4a0926f18bcc484c0db00bfbf1300}, + all_ss_ids = {['499846b295c4a0926f18bcc484c0db00bfbf1300']}, + gscites = {59}, } @article{Pinc21, @@ -18596,22 +23263,45 @@ @article{Pinc21 pmid = {33729928}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/235135}, + ss_id = {05fc7e2ebc4b7b0efc9bc79d4c5119c1a4d8ae38}, + all_ss_ids = {['05fc7e2ebc4b7b0efc9bc79d4c5119c1a4d8ae38']}, + gscites = {41}, } -@Article{Pinc22, - author = {Pinckaers, Hans and van Ipenburg, Jolique and Melamed, Jonathan and De Marzo, Angelo and Platz, Elizabeth A. and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, - title = {Predicting biochemical recurrence of prostate cancer with artificial intelligence}, - doi = {10.1038/s43856-022-00126-3}, - pages = {64}, - volume = {2}, +@article{Pinc22, + author = {Pinckaers, Hans and van Ipenburg, Jolique and Melamed, Jonathan and De Marzo, Angelo and Platz, Elizabeth A. and van Ginneken, Bram and van der Laak, Jeroen and Litjens, Geert}, + title = {Predicting biochemical recurrence of prostate cancer with artificial intelligence}, + doi = {10.1038/s43856-022-00126-3}, + pages = {64}, + volume = {2}, abstract = {Background: The first sign of metastatic prostate cancer after radical prostatectomy is rising PSA levels in the blood, termed biochemical recurrence. The prediction of recurrence relies mainly on the morphological assessment of prostate cancer using the Gleason grading system. However, in this system, within-grade morphological patterns and subtle histopathological features are currently omitted, leaving a significant amount of prognostic potential unexplored. - Methods: To discover additional prognostic information using artificial intelligence, we trained a deep learning system to predict biochemical recurrence from tissue in H\&E-stained microarray cores directly. We developed a morphological biomarker using convolutional neural networks leveraging a nested case-control study of 685 patients and validated on an independent cohort of 204 patients. We use concept-based explainability methods to interpret the learned tissue patterns. - Results: The biomarker provides a strong correlation with biochemical recurrence in two sets (n = 182 and n = 204) from separate institutions. Concept-based explanations provided tissue patterns interpretable by pathologists. - Conclusions: These results show that the model finds predictive power in the tissue beyond the morphological ISUP grading.}, - file = {:pdf/Pinc22.pdf:PDF}, - journal = COMMMED, - pmid = {35693032}, - year = {2022}, + Methods: To discover additional prognostic information using artificial intelligence, we trained a deep learning system to predict biochemical recurrence from tissue in H\&E-stained microarray cores directly. We developed a morphological biomarker using convolutional neural networks leveraging a nested case-control study of 685 patients and validated on an independent cohort of 204 patients. We use concept-based explainability methods to interpret the learned tissue patterns. + Results: The biomarker provides a strong correlation with biochemical recurrence in two sets (n = 182 and n = 204) from separate institutions. Concept-based explanations provided tissue patterns interpretable by pathologists. + Conclusions: These results show that the model finds predictive power in the tissue beyond the morphological ISUP grading.}, + file = {:pdf/Pinc22.pdf:PDF}, + journal = COMMMED, + pmid = {35693032}, + year = {2022}, + ss_id = {7201377df1b1bcfa9f4f57d797f561842e848c9f}, + all_ss_ids = {['7201377df1b1bcfa9f4f57d797f561842e848c9f']}, + gscites = {7}, +} + +@article{Pist21, + author = {Pistenmaa, Carrie L. and Nardelli, P. and Ash, S.Y. and Come, C.E. and Diaz, A.A. and Rahaghi, F.N. and Barr, R.G. and Young, K.A. and Kinney, G.L. and Simmons, J.P. and Wade, R.C. and Wells, J.M. and Hokanson, J.E. and Washko, G.R. and San Jos\'{e} Est\'{e}par, R. and Crapo, James D. and Silverman, Edwin K. and Make, Barry J. and Regan, Elizabeth A. and Beaty, Terri H. and Castaldi, Peter J. and Cho, Michael H. and DeMeo, Dawn L. and El Boueiz, Adel and Foreman, Marilyn G. and Ghosh, Auyon and Hayden, Lystra P. and Hersh, Craig P. and Hetmanski, Jacqueline and Hobbs, Brian D. and Hokanson, John E. and Kim, Wonji and Laird, Nan and Lange, Christoph and Lutz, Sharon M. and McDonald, Merry-Lynn and Prokopenko, Dmitry and Moll, Matthew and Morrow, Jarrett and Qiao, Dandi and Regan, Elizabeth A. and Saferali, Aabida and Sakornsakolpat, Phuwanat and Silverman, Edwin K. and Wan, Emily S. and Yun, Jeong and Centeno, Juan Pablo and Charbonnier, Jean-Paul and Coxson, Harvey O. and Galban, Craig J. and Han, MeiLan K. and Hoffman, Eric A. and Humphries, Stephen and Jacobson, Francine L. and Judy, Philip F. and Kazerooni, Ella A. and Kluiber, Alex and Lynch, David A. and Nardelli, Pietro and Newell, John D. and Notary, Aleena and Oh, Andrea and Regan, Elizabeth A. and Ross, James C. and San Jose Estepar, Raul and Schroeder, Joyce and Sieren, Jered and Stoel, Berend C. and Tschirren, Juerg and Van Beek, Edwin and Ginneken, Bramvan and van Rikxoort, Eva and Sanchez- Ferrero, Gonzalo Vegas and Veitel, Lucas and Washko, George R. and Wilson, Carla G. and Jensen, Robert and Everett, Douglas and Crooks, Jim and Pratte, Katherine and Strand, Matt and Wilson, Carla G. and Hokanson, John E. and Austin, Erin and Kinney, Gregory and Lutz, Sharon M. and Young, Kendra A. and Bhatt, Surya P. and Bon, Jessica and Diaz, Alejandro A. and Han, MeiLan K. and Make, Barry and Murray, Susan and Regan, Elizabeth and Soler, Xavier and Wilson, Carla G. and Bowler, Russell P. and Kechris, Katerina and Banaei-Kashani, Farnoush and Curtis, Jeffrey L. and Pernicano, Perry G. and Hanania, Nicola and Atik, Mustafa and Boriek, Aladin and Guntupalli, Kalpatha and Guy, Elizabeth and Parulekar, Amit and DeMeo, Dawn L. and Hersh, Craig and Jacobson, Francine L. and Washko, George and Barr, R. Graham and Austin, John and D'Souza, Belinda and Thomashow, Byron and MacIntyre, Neil and McAdams, H. Page and Washington, Lacey and McEvoy, Charlene and Tashjian, Joseph and Wise, Robert and Brown, Robert and Hansel, Nadia N. and Horton, Karen and Lambert, Allison and Putcha, Nirupama and Casaburi, Richard and Adami, Alessandra and Budoff, Matthew and Fischer, Hans and Porszasz, Janos and Rossiter, Harry and Stringer, William and Sharafkhaneh, Amir and Lan, Charlie and Wendt, Christine and Bell, Brian and Kunisaki, Ken M. and Flenaugh, Eric L. and Gebrekristos, Hirut and Ponce, Mario and Terpenning, Silanath and Westney, Gloria and Bowler, Russell and Lynch, David A. and Rosiello, Richard and Pace, David and Criner, Gerard and Ciccolella, David and Cordova, Francis and Dass, Chandra and D'Alonzo, Gilbert and Desai, Parag and Jacobs, Michael and Kelsen, Steven and Kim, Victor and Mamary, A. James and Marchetti, Nathaniel and Satti, Aditi and Shenoy, Kartik and Steiner, Robert M. and Swift, Alex and Swift, Irene and Vega-Sanchez, Maria Elena and Dransfield, Mark and Bailey, William and Bhatt, Surya P. and Iyer, Anand and Nath, Hrudaya and Wells, J. Michael and Conrad, Douglas and Soler, Xavier and Yen, Andrew and Comellas, Alejandro P. and Hoth, Karin F. and Newell, John and Thompson, Brad and Han, MeiLan K. and Kazerooni, Ella and Labaki, Wassim and Galban, Craig and Vummidi, Dharshan and Billings, Joanne and Begnaud, Abbie and Allen, Tadashi and Sciurba, Frank and Bon, Jessica and Chandra, Divay and Weissfeld, Joel}, + title = {~~Pulmonary Arterial Pruning and Longitudinal Change in Percent Emphysema and Lung Function}, + doi = {10.1016/j.chest.2021.01.084}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.chest.2021.01.084}, + file = {Pist21.pdf:pdf\\Pist21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Chest}, + automatic = {yes}, + citation-count = {14}, + pages = {470-480}, + volume = {160}, + pmid = {33607083}, } @phdthesis{Plat07, @@ -18671,7 +23361,9 @@ @article{Plat14 pmid = {24058020}, month = {2}, gsid = {15091674449478966998}, - gscites = {49}, + gscites = {61}, + ss_id = {30562d0cc6cfc7526d678103b46d02e446af2df8}, + all_ss_ids = {['30562d0cc6cfc7526d678103b46d02e446af2df8']}, } @article{Poko15, @@ -18689,6 +23381,22 @@ @article{Poko15 month = {3}, } +@article{Pola23, + author = {Polack, Meaghan and Smit, Marloes A. and Crobach, Stijn A.L.P. and Terpstra, Valeska and Roodvoets, Annet G.H. and Meershoek-Klein Kranenbarg, Elma M. and Dequeker, Els M.C. and van der Laak, Jeroen A. and Tollenaar, Rob A.E.M. and van Krieken, Han J.H.J.M. and Mesker, Wilma E.}, + title = {~~Uniform Noting for International application of the Tumor-stroma ratio as Easy Diagnostic tool: The UNITED study - An update}, + doi = {10.1016/j.ejso.2022.11.378}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejso.2022.11.378}, + file = {Pola23.pdf:pdf\\Pola23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Surgical Oncology}, + automatic = {yes}, + citation-count = {0}, + pages = {e132-e133}, + volume = {49}, +} + @article{Pomp15, author = {Pompe, Esther and van Rikxoort, Eva M. and Schmidt, Michael and R\"uhaak, Jan and Gallardo-Estrella, L. and Vliegenthart, Rozemarijn and Oudkerk, Matthijs and de Koning, Harry J. and van Ginneken, Bram and de Jong, Pim A. and Lammers, Jan-Willem J. and Mohamed Hoesein, Firdaus A A.}, title = {Parametric response mapping adds value to current computed tomography biomarkers in diagnosing chronic obstructive pulmonary disease}, @@ -18703,8 +23411,10 @@ @article{Pomp15 pmid = {25932766}, month = {5}, gsid = {2861070141397780388}, - gscites = {16}, + gscites = {27}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/153586}, + ss_id = {fd9e39eec2414dfe630f8fbd82b093ef18f2f6b4}, + all_ss_ids = {['fd9e39eec2414dfe630f8fbd82b093ef18f2f6b4']}, } @conference{Pomp15a, @@ -18736,7 +23446,9 @@ @article{Pomp16 pmid = {27354779}, month = {6}, gsid = {2929627178945368618}, - gscites = {8}, + gscites = {14}, + ss_id = {b9c37550bf40f47dd0ce71ac59490144fd7707e8}, + all_ss_ids = {['b9c37550bf40f47dd0ce71ac59490144fd7707e8']}, } @article{Pomp16a, @@ -18749,23 +23461,25 @@ @article{Pomp16a pages = {2008-2013}, doi = {10.1016/j.ejrad.2016.09.009}, abstract = {Objectives - Airway wall thickness (AWT) is affected by changes in lung volume. This study evaluated whether correcting AWT on computed tomography (CT) for differences in inspiration level improves measurement agreement, reliability, and power to detect changes over time. - - Methods - Participants of the Dutch-Belgian lung cancer screening trial who underwent 3-month repeat CT for an indeterminate pulmonary nodule were included. AWT on CT was calculated by the square root of the wall area at a theoretical airway with an internal perimeter of 10?mm (Pi10). The scan with the highest lung volume was labelled as the reference scan and the scan with the lowest lung volume was labelled as the comparison scan. Pi10 derived from the comparison scan was corrected by multiplying it with the ratio of CT lung volume of the comparison scan to CT lung volume on the reference scan. Agreement of uncorrected and corrected Pi10 was studied with the Bland-Altman method, reliability with intra-class correlation coefficients (ICC), and power to detect changes over time was calculated. - - Results - 315 male participants were included. Limit of agreement and reliability for Pi10 was ?0.61 to 0.57?mm (ICC?=?0.87), which improved to ?0.38 to 0.37?mm (ICC?=?0.94) after correction for inspiration level. To detect a 15% change over 3 months, 71 subjects are needed for Pi10 and 26 subjects for Pi10 adjusted for inspiration level. - - Conclusions - Correcting Pi10 for differences in inspiration level improves reliability, agreement, and power to detect changes over time.}, + Airway wall thickness (AWT) is affected by changes in lung volume. This study evaluated whether correcting AWT on computed tomography (CT) for differences in inspiration level improves measurement agreement, reliability, and power to detect changes over time. + + Methods + Participants of the Dutch-Belgian lung cancer screening trial who underwent 3-month repeat CT for an indeterminate pulmonary nodule were included. AWT on CT was calculated by the square root of the wall area at a theoretical airway with an internal perimeter of 10?mm (Pi10). The scan with the highest lung volume was labelled as the reference scan and the scan with the lowest lung volume was labelled as the comparison scan. Pi10 derived from the comparison scan was corrected by multiplying it with the ratio of CT lung volume of the comparison scan to CT lung volume on the reference scan. Agreement of uncorrected and corrected Pi10 was studied with the Bland-Altman method, reliability with intra-class correlation coefficients (ICC), and power to detect changes over time was calculated. + + Results + 315 male participants were included. Limit of agreement and reliability for Pi10 was ?0.61 to 0.57?mm (ICC?=?0.87), which improved to ?0.38 to 0.37?mm (ICC?=?0.94) after correction for inspiration level. To detect a 15% change over 3 months, 71 subjects are needed for Pi10 and 26 subjects for Pi10 adjusted for inspiration level. + + Conclusions + Correcting Pi10 for differences in inspiration level improves reliability, agreement, and power to detect changes over time.}, file = {Pomp16a.pdf:pdf\\Pomp16a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {27776653}, month = {11}, gsid = {13729606176859777849}, - gscites = {4}, + gscites = {8}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/173155}, + ss_id = {2de6f1a75703272b9140f19cfe83c0a42527ef72}, + all_ss_ids = {['2de6f1a75703272b9140f19cfe83c0a42527ef72']}, } @article{Pomp17, @@ -18783,8 +23497,10 @@ @article{Pomp17 optnote = {DIAG}, pmid = {28424361}, gsid = {3304458654067065492}, - gscites = {10}, + gscites = {17}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/174975}, + ss_id = {bdba224a4595a84c0850c4bbc3d13b2582e4f08a}, + all_ss_ids = {['bdba224a4595a84c0850c4bbc3d13b2582e4f08a']}, } @article{Ponc14, @@ -18807,7 +23523,7 @@ @article{Post13a pages = {114-121}, volume = {63}, abstract = {Aims: Virtual microscopy offers major advantages for pathology practice, separating slide evaluation from slide production. The aim of this study was to investigate the reliability of using whole slide images as compared with routine glass slides for diagnostic purposes. Methods and results: Colon biopsies (n = 295) were assessed using both glass slides and whole slide images by four pathologists and two residents. Two pathologists scored the digital images of biopsies in a primary diagnostic setting. For each case, the consensus diagnosis was de?ned as the majority diagnosis on the study's glass slides. All diagnoses were grouped into seven main diagnostic categories, and further divided into subgroups. The overall concor - dance rates were 89.6% for whole slide images and 91.6% for light microscopy. The concordance rates of the subgroups 'adenoma' and 'adenocarcinoma' between whole slide images and conventional microscopy showed only small variability. The intraobserver (whole slide images versus glass slide) agreement, including subgroups, was substantial, with a mean j-value of 0.78, and was higher than the interobserver agreement for glass slides (interobserver j-value of 0.69). Conclusions: This study shows good diagnostic accuracy and reproducibility for virtual microscopy, indicating that this technology can reliably be used for pathological evaluation of colon biopsies in a primary clinical setting.}, + dance rates were 89.6% for whole slide images and 91.6% for light microscopy. The concordance rates of the subgroups 'adenoma' and 'adenocarcinoma' between whole slide images and conventional microscopy showed only small variability. The intraobserver (whole slide images versus glass slide) agreement, including subgroups, was substantial, with a mean j-value of 0.78, and was higher than the interobserver agreement for glass slides (interobserver j-value of 0.69). Conclusions: This study shows good diagnostic accuracy and reproducibility for virtual microscopy, indicating that this technology can reliably be used for pathological evaluation of colon biopsies in a primary clinical setting.}, file = {Post13.pdf:pdf\\post13.pdf:PDF}, journal = Histopathology, month = {3}, @@ -18885,7 +23601,9 @@ @article{Prok20 optnote = {DIAG, RADIOLOGY}, pmid = {32339082}, gsid = {16652056044070556446}, - gscites = {47}, + gscites = {576}, + ss_id = {12e5057823a452673ae0aa4f8a64d4a88fdeaf27}, + all_ss_ids = {['12e5057823a452673ae0aa4f8a64d4a88fdeaf27']}, } @article{Prok93, @@ -18975,6 +23693,9 @@ @article{Prok97b number = {11}, pmid = {9169105}, month = {2}, + ss_id = {7b442acc3a6bedd75701c15484e7e0c817111122}, + all_ss_ids = {['7b442acc3a6bedd75701c15484e7e0c817111122']}, + gscites = {0}, } @article{Pros12, @@ -18990,6 +23711,9 @@ @article{Pros12 optnote = {DIAG, RADIOLOGY}, pmid = {23239063}, month = {12}, + ss_id = {629276b035ec77dbc9af62d747d2a923079ec12f}, + all_ss_ids = {['629276b035ec77dbc9af62d747d2a923079ec12f']}, + gscites = {52}, } @article{Pros13, @@ -19006,6 +23730,9 @@ @article{Pros13 number = {7}, pmid = {23873184}, month = {7}, + ss_id = {fcb8b0a49b42ea6834e69133f752db45836ba1f8}, + all_ss_ids = {['fcb8b0a49b42ea6834e69133f752db45836ba1f8']}, + gscites = {0}, } @article{Puig02a, @@ -19034,6 +23761,26 @@ @article{Putz19 file = {Putz19.pdf:pdf\\Putz19.pdf:PDF}, optnote = {DIAG}, month = {10}, + ss_id = {646755572428ec82a125c19d371b5c853ce3c050}, + all_ss_ids = {['646755572428ec82a125c19d371b5c853ce3c050']}, + gscites = {27}, +} + +@article{Qin22, + author = {Qin, Qin and Alsop, David C. and Bolar, Divya S. and Hernandez-Garcia, Luis and Meakin, James and Liu, Dapeng and Nayak, Krishna S. and Schmid, Sophie and van Osch, Matthias J. P. and Wong, Eric C. and Woods, Joseph G. and Zaharchuk, Greg and Zhao, Moss Y. and Zun, Zungho and Guo, Jia and the ISMRMPerfusion Study Group}, + title = {~~Velocity-selective arterial spin labeling perfusion MRI: A review of the state of the art and recommendations for clinical implementation}, + doi = {10.1002/mrm.29371}, + year = {2022}, + abstract = {AbstractThis review article provides an overview of the current status of velocity-selective arterial spin labeling (VSASL) perfusion MRI and is part of a wider effort arising from the International Society for Magnetic Resonance in Medicine (ISMRM) Perfusion Study Group. Since publication of the 2015 consensus paper on arterial spin labeling (ASL) for cerebral perfusion imaging, important advancements have been made in the field. The ASL community has, therefore, decided to provide an extended perspective on various aspects of technical development and application. Because VSASL has the potential to become a principal ASL method because of its unique advantages over traditional approaches, an in-depth discussion was warranted. VSASL labels blood based on its velocity and creates a magnetic bolus immediately proximal to the microvasculature within the imaging volume. VSASL is, therefore, insensitive to transit delay effects, in contrast to spatially selective pulsed and (pseudo-) continuous ASL approaches. Recent technical developments have improved the robustness and the labeling efficiency of VSASL, making it a potentially more favorable ASL approach in a wide range of applications where transit delay effects are of concern. In this review article, we (1) describe the concepts and theoretical basis of VSASL; (2) describe different variants of VSASL and their implementation; (3) provide recommended parameters and practices for clinical adoption; (4) describe challenges in developing and implementing VSASL; and (5) describe its current applications. As VSASL continues to undergo rapid development, the focus of this review is to summarize the fundamental concepts of VSASL, describe existing VSASL techniques and applications, and provide recommendations to help the clinical community adopt VSASL.}, + url = {http://dx.doi.org/10.1002/mrm.29371}, + file = {Qin22.pdf:pdf\\Qin22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Magnetic Resonance in Medicine}, + automatic = {yes}, + citation-count = {15}, + pages = {1528-1547}, + volume = {88}, + pmid = {35819184}, } @inproceedings{Rabb14, @@ -19051,6 +23798,8 @@ @inproceedings{Rabb14 month = {3}, gsid = {17944010023072660192}, gscites = {1}, + ss_id = {03582bc12f4bb32136d028e3d09fa07b98b25bb9}, + all_ss_ids = {['03582bc12f4bb32136d028e3d09fa07b98b25bb9']}, } @inproceedings{Rads11, @@ -19079,6 +23828,8 @@ @inproceedings{Ramo12 optnote = {DIAG}, gsid = {274380764387987398}, gscites = {3}, + ss_id = {1df43a13035c094cc8d3eafd058507f4f3e4c57c}, + all_ss_ids = {['1df43a13035c094cc8d3eafd058507f4f3e4c57c']}, } @inproceedings{Ramo13, @@ -19091,7 +23842,9 @@ @inproceedings{Ramo13 file = {Ramo13.pdf:pdf\\Ramo13.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {10439674960459647694}, - gscites = {1}, + gscites = {2}, + ss_id = {c286748728235f6b06e91e1e0ddb4fb3e2b97412}, + all_ss_ids = {['c286748728235f6b06e91e1e0ddb4fb3e2b97412']}, } @article{Ramo15, @@ -19109,7 +23862,24 @@ @article{Ramo15 pmid = {25438332}, month = {1}, gsid = {11929678439974614784}, - gscites = {26}, + gscites = {34}, + ss_id = {2d49c0641c15bb0d0d00c11fbc0155c9942a9ba3}, + all_ss_ids = {['2d49c0641c15bb0d0d00c11fbc0155c9942a9ba3']}, +} + +@book{Rao21, + author = {Rao, Chinmay and Pai, Suraj and Hadzic, Ibrahim and Zhovannik, Ivan and Bontempi, Dennis and Dekker, Andre and Teuwen, Jonas and Traverso, Alberto}, + title = {~~Oropharyngeal Tumour Segmentation Using Ensemble 3D PET-CT Fusion Networks for the HECKTOR Challenge}, + doi = {10.1007/978-3-030-67194-5_8}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-030-67194-5_8}, + file = {Rao21.pdf:pdf\\Rao21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Head and Neck Tumor Segmentation}, + automatic = {yes}, + citation-count = {3}, + pages = {65-77}, } @inproceedings{Raza15, @@ -19123,6 +23893,23 @@ @inproceedings{Raza15 optnote = {DIAG}, gsid = {8743979472382399251}, gscites = {11}, + ss_id = {ba17f21a7df9005f18a97e1385b83d90709c8ebe}, + all_ss_ids = {['ba17f21a7df9005f18a97e1385b83d90709c8ebe']}, +} + +@book{Raza16, + author = {Razavi, Mohammad and Wang, Lei and Tan, Tao and Karssemeijer, Nico and Linsen, Lars and Frese, Udo and Hahn, Horst K. and Zachmann, Gabriel}, + title = {~~Novel Morphological Features for Non-mass-like Breast Lesion Classification on DCE-MRI}, + doi = {10.1007/978-3-319-47157-0_37}, + year = {2016}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/978-3-319-47157-0_37}, + file = {Raza16.pdf:pdf\\Raza16.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Machine Learning in Medical Imaging}, + automatic = {yes}, + citation-count = {2}, + pages = {305-312}, } @article{Reij18, @@ -19136,6 +23923,9 @@ @article{Reij18 file = {Reij18.pdf:pdf\\Reij18.pdf:PDF}, optnote = {DIAG}, pmid = {30428390}, + ss_id = {d646accbc0394ecfa0da14066a825ca1695c8dac}, + all_ss_ids = {['d646accbc0394ecfa0da14066a825ca1695c8dac']}, + gscites = {3}, } @article{Rein06, @@ -19178,10 +23968,13 @@ @inproceedings{Rein21 title = {Common limitations of performance metrics in biomedical image analysis}, doi = {10.1117/12.2549650}, abstract = {Diffuse large B-cell lymphoma (DLBCL) is the most common type of B-cell lymphoma. It is characterized by a heterogeneous morphology, genetic changes and clinical behavior. A small specific subgroup of DLBCL, harbouring a MYC gene translocation is associated with worse patient prognosis and outcome. Typically, the MYC translocation is assessed with a molecular test (FISH), that is expensive and time-consuming. Our hypothesis is that genetic changes, such as translocations could be visible as changes in the morphology of an HE-stained specimen. However, it has not proven possible to use morphological criteria for the detection of a MYC translocation in the diagnostic setting due to lack of specificity. - In this paper, we apply a deep learning model to automate detection of the MYC translocations in DLBCL based on HE-stained specimens. The proposed method works at the whole-slide level and was developed based on a multicenter data cohort of 91 patients. All specimens were stained with HE, and the MYC translocation was confirmed using fluorescence in situ hybridization (FISH). The system was evaluated on an additional 66 patients, and obtained AUROC of 0.83 and accuracy of 0.77. The proposed method presents proof of a concept giving insights in the applicability of deep learning methods for detection of a genetic changes in DLBCL. In future work we will evaluate our algorithm for automatic pre-screen of DLBCL specimens to obviate FISH analysis in a large number of patients.}, + In this paper, we apply a deep learning model to automate detection of the MYC translocations in DLBCL based on HE-stained specimens. The proposed method works at the whole-slide level and was developed based on a multicenter data cohort of 91 patients. All specimens were stained with HE, and the MYC translocation was confirmed using fluorescence in situ hybridization (FISH). The system was evaluated on an additional 66 patients, and obtained AUROC of 0.83 and accuracy of 0.77. The proposed method presents proof of a concept giving insights in the applicability of deep learning methods for detection of a genetic changes in DLBCL. In future work we will evaluate our algorithm for automatic pre-screen of DLBCL specimens to obviate FISH analysis in a large number of patients.}, file = {:pdf/Rein21.pdf:PDF}, optnote = {DIAG, PATHOLOGY, RADIOLOGY}, year = {2021}, + ss_id = {a9dc7e9f174dbd624d5a8294ca6dc9b671e1ae97}, + all_ss_ids = {['a9dc7e9f174dbd624d5a8294ca6dc9b671e1ae97']}, + gscites = {2}, } @article{Rein21a, @@ -19194,6 +23987,53 @@ @article{Rein21a month = apr, optnote = {DIAG, PATHOLOGY, RADIOLOGY}, year = {2021}, + ss_id = {787ef2981e94e481432cb2a1296903676ec80fa9}, + all_ss_ids = {['787ef2981e94e481432cb2a1296903676ec80fa9']}, + gscites = {94}, +} + +@article{Rein23, + author = {Reinke, Annika and Tizabi, Minu D. and Baumgartner, Michael and Eisenmann, Matthias and Heckmann-Notzel, Doreen and Kavur, A. Emre and Radsch, Tim and Sudre, Carole H. and Acion, Laura and Antonelli, Michela and Arbel, Tal and Bakas, Spyridon and Benis, Arriel and Blaschko, Matthew and Buttner, Florian and Cardoso, M. Jorge and Cheplygina, Veronika and Chen, Jianxu and Christodoulou, Evangelia and Cimini, Beth A. and Collins, Gary S. and Farahani, Keyvan and Ferrer, Luciana and Galdran, Adrian and van Ginneken, Bram and Glocker, Ben and Godau, Patrick and Haase, Robert and Hashimoto, Daniel A. and Hoffman, Michael M. and Huisman, Merel and Isensee, Fabian and Jannin, Pierre and Kahn, Charles E. and Kainmueller, Dagmar and Kainz, Bernhard and Karargyris, Alexandros and Karthikesalingam, Alan and Kenngott, Hannes and Kleesiek, Jens and Kofler, Florian and Kooi, Thijs and Kopp-Schneider, Annette and Kozubek, Michal and Kreshuk, Anna and Kurc, Tahsin and Landman, Bennett A. and Litjens, Geert and Madani, Amin and Maier-Hein, Klaus and Martel, Anne L. and Mattson, Peter and Meijering, Erik and Menze, Bjoern and Moons, Karel G. M. and Muller, Henning and Nichyporuk, Brennan and Nickel, Felix and Petersen, Jens and Rafelski, Susanne M. and Rajpoot, Nasir and Reyes, Mauricio and Riegler, Michael A. and Rieke, Nicola and Saez-Rodriguez, Julio and Sanchez, Clara I. and Shetty, Shravya and van Smeden, Maarten and Summers, Ronald M. and Taha, Abdel A. and Tiulpin, Aleksei and Tsaftaris, Sotirios A. and Van Calster, Ben and Varoquaux, Gael and Wiesenfarth, Manuel and Yaniv, Ziv R. and Jager, Paul F. and Maier-Hein, Lena}, + year = {2023}, + month = {2}, + title = {Understanding metric-related pitfalls in image analysis validation}, + abstract = {Validation metrics are key for the reliable tracking of scientific progress and for bridging the current chasm between artificial intelligence (AI) research and its translation into practice. However, increasing evidence shows that particularly in image analysis, metrics are often chosen inadequately in relation to the underlying research problem. This could be attributed to a lack of accessibility of metric-related knowledge: While taking into account the individual strengths, weaknesses, and limitations of validation metrics is a critical prerequisite to making educated choices, the relevant knowledge is currently scattered and poorly accessible to individual researchers. Based on a multi-stage Delphi process conducted by a multidisciplinary expert consortium as well as extensive community feedback, the present work provides the first reliable and comprehensive common point of access to information on pitfalls related to validation metrics in image analysis. Focusing on biomedical image analysis but with the potential of transfer to other fields, the addressed pitfalls generalize across application domains and are categorized according to a newly created, domain-agnostic taxonomy. To facilitate comprehension, illustrations and specific examples accompany each pitfall. As a structured body of information accessible to researchers of all levels of expertise, this work enhances global comprehension of a key topic in image analysis validation.}, + file = {:pdf/Rein23.pdf:PDF}, + journal = {arXiv:2302.01790}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + ss_id = {1898998f27ce0750a42f8f3ca2ed7e292f05aee2}, + all_ss_ids = {['1898998f27ce0750a42f8f3ca2ed7e292f05aee2']}, + gscites = {9}, +} + +@inproceedings{Reis15, + author = {Reis, Sara and Eiben, Bjoern and Mertzanidou, Thomy and Hipwell, John and Hermsen, Meyke and van der Laak, Jeroen and Pinder, Sarah and Bult, Peter and Hawkes, David}, + title = {~~Minimum slice spacing required to reconstruct 3D shape for serial sections of breast tissue for comparison with medical imaging}, + doi = {10.1117/12.2081909}, + year = {2015}, + abstract = {There is currently an increasing interest in combining the information obtained from radiology and histology with the intent of gaining a better understanding of how different tumour morphologies can lead to distinctive radiological signs which might predict overall treatment outcome. Relating information at different resolution scales is challenging. Reconstructing 3D volumes from histology images could be the key to interpreting and relating the radiological image signal to tissue microstructure. The goal of this study is to determine the minimum sampling (maximum spacing between histological sections through a fixed surgical specimen) required to create a 3D reconstruction of the specimen to a specific tolerance. We present initial results for one lumpectomy specimen case where 33 consecutive histology slides were acquired.}, + url = {http://dx.doi.org/10.1117/12.2081909}, + file = {Reis15.pdf:pdf\\Reis15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2015: Digital Pathology}, + automatic = {yes}, + citation-count = {2}, + pmid = {24819231}, +} + +@inproceedings{Reis15, + author = {Reis, Sara and Eiben, Bjoern and Mertzanidou, Thomy and Hipwell, John and Hermsen, Meyke and van der Laak, Jeroen and Pinder, Sarah and Bult, Peter and Hawkes, David}, + title = {~~Minimum slice spacing required to reconstruct 3D shape for serial sections of breast tissue for comparison with medical imaging}, + doi = {10.1117/12.2081909}, + year = {2015}, + abstract = {There is currently an increasing interest in combining the information obtained from radiology and histology with the intent of gaining a better understanding of how different tumour morphologies can lead to distinctive radiological signs which might predict overall treatment outcome. Relating information at different resolution scales is challenging. Reconstructing 3D volumes from histology images could be the key to interpreting and relating the radiological image signal to tissue microstructure. The goal of this study is to determine the minimum sampling (maximum spacing between histological sections through a fixed surgical specimen) required to create a 3D reconstruction of the specimen to a specific tolerance. We present initial results for one lumpectomy specimen case where 33 consecutive histology slides were acquired.}, + url = {http://dx.doi.org/10.1117/12.2081909}, + file = {Reis15.pdf:pdf\\Reis15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Imaging 2015: Digital Pathology}, + automatic = {yes}, + citation-count = {2}, + pmid = {24819231}, } @article{Rema16, @@ -19213,7 +24053,9 @@ @article{Rema16 pmid = {28783673}, publisher = {American Association for the Advancement of Science ({AAAS})}, gsid = {15567873193529139666}, - gscites = {63}, + gscites = {131}, + ss_id = {69cc46b96b0e8b0926de51d7a5bf83e3c29897f9}, + all_ss_ids = {['69cc46b96b0e8b0926de51d7a5bf83e3c29897f9']}, } @inproceedings{Riad13, @@ -19229,7 +24071,9 @@ @inproceedings{Riad13 optnote = {DIAG, RADIOLOGY}, month = {2}, gsid = {8090823468847966154}, - gscites = {4}, + gscites = {5}, + ss_id = {171511e9d4b9a921232d02464304ee443c23f897}, + all_ss_ids = {['171511e9d4b9a921232d02464304ee443c23f897']}, } @article{Ridg15, @@ -19244,6 +24088,8 @@ @article{Ridg15 optnote = {DIAG}, pmid = {26458208}, month = {3}, + all_ss_ids = {['75bc9847c96d8f319f82d1b9ba01b5a396247152', 'a6a2fed1ee525c0d2a35ae6454660eca53ce03fc', 'fdf663cc9dda29d55f2e2f44f9876f3b08f854f8']}, + gscites = {70}, } @conference{Riel13, @@ -19279,7 +24125,9 @@ @article{Riel15 pmid = {26020438}, month = {12}, gsid = {13650233394438006732}, - gscites = {113}, + gscites = {152}, + ss_id = {a920a54da22d5e6114588d1ad78fadba534c1ad4}, + all_ss_ids = {['a920a54da22d5e6114588d1ad78fadba534c1ad4']}, } @conference{Riel15b, @@ -19333,8 +24181,10 @@ @article{Riel17 pmid = {28293773}, month = {3}, gsid = {3463244640968070105}, - gscites = {28}, + gscites = {45}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/181941}, + ss_id = {e80b85ea45f4af6ed7278be42553ead2ee5b991d}, + all_ss_ids = {['e80b85ea45f4af6ed7278be42553ead2ee5b991d']}, } @article{Riel17a, @@ -19352,7 +24202,9 @@ @article{Riel17a pmid = {29121063}, month = {11}, gsid = {11335791003669055242}, - gscites = {18}, + gscites = {30}, + ss_id = {af7a0c17980d8692f50adb16c63d02deed0d54ad}, + all_ss_ids = {['af7a0c17980d8692f50adb16c63d02deed0d54ad']}, } @article{Riel19, @@ -19370,8 +24222,10 @@ @article{Riel19 pmid = {30066248}, month = {7}, gsid = {9823201264187338687}, - gscites = {9}, + gscites = {40}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/201229}, + ss_id = {19495049936b938f43434c03cb18ea0702d6f09d}, + all_ss_ids = {['19495049936b938f43434c03cb18ea0702d6f09d']}, } @phdthesis{Riel20, @@ -19407,14 +24261,16 @@ @inproceedings{Rijt18 file = {:pdf/Rijt18.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {4926044385152081806}, - gscites = {2}, + gscites = {20}, + ss_id = {c406706c31d311ba4a32cd29d58b2cc12a432ad3}, + all_ss_ids = {['c406706c31d311ba4a32cd29d58b2cc12a432ad3']}, } @mastersthesis{Rijt19, author = {Mart van Rijthoven}, title = {Cancer research in digital pathology using convolutional neural networks}, abstract = {Understanding the progression of cancer is at the core of cancer research. In this thesis we combine high resolution features with low resolution contextual features to automatic segment cancerous associated tissue in gigapixel histopathology whole slide images (WSIs) stained with hematoxylin and eosin. We take advantage of the multi-resolution data structure of WSIs and obtain contextual features through the use of dilated convolutions. Our proposed multi-resolution method has a comparable F1-score performance compared to a single low resolution method. Furthermore, the proposed method increases the F1-score by 34% for ductal Carcinoma In Situ (DCIS) and 12% for invasive ductal carcinoma (IDC) in comparison with a single high resolution method. - Lymphocytes play an important role in the progression of cancer. In the second part of this thesis, we boost the potential of the You Only Look Once (YOLO) architecture applied to automatic detection of lymphocytes in WSIs stained with immunohistochemistry by (1) tailoring the YOLO architecture to lymphocyte detection in WSI; (2) guiding training data sampling by exploiting prior knowledge on hard negative samples; (3) pairing the proposed sampling strategy with the focal loss technique. The combination of the proposed improvements increases the F1-score of YOLO by 3% with a speed-up of 4.3X.}, + Lymphocytes play an important role in the progression of cancer. In the second part of this thesis, we boost the potential of the You Only Look Once (YOLO) architecture applied to automatic detection of lymphocytes in WSIs stained with immunohistochemistry by (1) tailoring the YOLO architecture to lymphocyte detection in WSI; (2) guiding training data sampling by exploiting prior knowledge on hard negative samples; (3) pairing the proposed sampling strategy with the focal loss technique. The combination of the proposed improvements increases the F1-score of YOLO by 3% with a speed-up of 4.3X.}, file = {Rijt19.pdf:pdf\\Rijt19.pdf:PDF}, optnote = {DIAG}, school = {University of Utrecht}, @@ -19438,6 +24294,9 @@ @article{Rijt21a optnote = {DIAG, PATHOLOGY}, algorithm = {https://grand-challenge.org/algorithms/hooknet-breast/}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/231089}, + ss_id = {455e92c1f629cd8065bc0c8287d3da52b451b98c}, + all_ss_ids = {['455e92c1f629cd8065bc0c8287d3da52b451b98c']}, + gscites = {85}, } @inproceedings{Rijt21b, @@ -19451,6 +24310,9 @@ @inproceedings{Rijt21b abstract = {In this work, we propose a deep learning system for weakly supervised object detection in digital pathology whole slide images. We designed the system to be organ- and object-agnostic, and to be adapted on-the-fly to detect novel objects based on a few examples provided by the user. We tested our method on detection of healthy glands in colon biopsies and ductal carcinoma in situ (DCIS) of the breast, showing that (1) the same system is capable of adapting to detect requested objects with high accuracy, namely 87% accuracy assessed on 582 detections in colon tissue, and 93% accuracy assessed on 163 DCIS detections in breast tissue; (2) in some settings, the system is capable of retrieving similar cases with little to none false positives (i.e., precision equal to 1.00); (3) the performance of the system can benefit from previously detected objects with high confidence that can be reused in new searches in an iterative fashion.}, year = {2021}, doi = {10.1117/12.2582132}, + ss_id = {19afccfe66de8733d799d45d816ed0ad8a8665f9}, + all_ss_ids = {['19afccfe66de8733d799d45d816ed0ad8a8665f9']}, + gscites = {1}, } @inproceedings{Rikx05, @@ -19516,6 +24378,8 @@ @inproceedings{Rikx07a optnote = {DIAG, RADIOLOGY}, gsid = {4292505718482622984}, gscites = {12}, + ss_id = {6e610abd4a920dcf0db83433fa903f7586603544}, + all_ss_ids = {['6e610abd4a920dcf0db83433fa903f7586603544']}, } @conference{Rikx07b, @@ -19542,6 +24406,8 @@ @inproceedings{Rikx08 month = {3}, gsid = {403647541700125074}, gscites = {24}, + ss_id = {024bb3acac8036e8a335b909f91c78391dcd4868}, + all_ss_ids = {['024bb3acac8036e8a335b909f91c78391dcd4868']}, } @article{Rikx08a, @@ -19559,6 +24425,8 @@ @article{Rikx08a pmid = {18270056}, gsid = {2240117688941041987}, gscites = {95}, + ss_id = {728b2b4783db4b9b2fe53835deea9bab95410ae8}, + all_ss_ids = {['728b2b4783db4b9b2fe53835deea9bab95410ae8']}, } @conference{Rikx08b, @@ -19602,6 +24470,8 @@ @article{Rikx09 gsid = {6723869456775488402}, gscites = {116}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/80377}, + ss_id = {35ed683f966071aa4d9e428ebdc7b245a310f9ea}, + all_ss_ids = {['35ed683f966071aa4d9e428ebdc7b245a310f9ea']}, } @inproceedings{Rikx09a, @@ -19619,6 +24489,8 @@ @inproceedings{Rikx09a pmid = {20425996}, gsid = {15092302850885680539}, gscites = {26}, + ss_id = {8a4482179ae334ba0dbd76060c30332a65f9861a}, + all_ss_ids = {['8a4482179ae334ba0dbd76060c30332a65f9861a']}, } @article{Rikx09b, @@ -19636,8 +24508,10 @@ @article{Rikx09b pmid = {19673192}, month = {6}, gsid = {14169941223089065550}, - gscites = {244}, + gscites = {256}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/80370}, + ss_id = {a4b778f8e002b34fd337ada732d5257c846495a9}, + all_ss_ids = {['a4b778f8e002b34fd337ada732d5257c846495a9']}, } @inproceedings{Rikx09c, @@ -19650,7 +24524,9 @@ @inproceedings{Rikx09c file = {Rikx09c.pdf:pdf\\Rikx09c.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {13063578325072036697}, - gscites = {27}, + gscites = {37}, + ss_id = {31379d21a9c062f56598616eaf4b24b539c3dd8d}, + all_ss_ids = {['31379d21a9c062f56598616eaf4b24b539c3dd8d']}, } @phdthesis{Rikx09d, @@ -19693,8 +24569,10 @@ @article{Rikx10 pmid = {20304724}, month = {6}, gsid = {14940016956782221874}, - gscites = {94}, + gscites = {99}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/88439}, + ss_id = {f640a7a2f5618c0ca8e49b84af937bc16e868175}, + all_ss_ids = {['f640a7a2f5618c0ca8e49b84af937bc16e868175']}, } @article{Rikx10a, @@ -19714,6 +24592,8 @@ @article{Rikx10a gsid = {4163686614704886737}, gscites = {179}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/87927}, + ss_id = {b75ff2b9b67d7eaa6d0ef3231b755cde8f6915b7}, + all_ss_ids = {['b75ff2b9b67d7eaa6d0ef3231b755cde8f6915b7']}, } @inproceedings{Rikx10b, @@ -19728,6 +24608,8 @@ @inproceedings{Rikx10b optnote = {DIAG, RADIOLOGY}, gsid = {13602227483189066767}, gscites = {2}, + ss_id = {338b5d8af7633715a9062049ac8445c8b830df6d}, + all_ss_ids = {['338b5d8af7633715a9062049ac8445c8b830df6d']}, } @inproceedings{Rikx10c, @@ -19790,7 +24672,9 @@ @inproceedings{Rikx11d optnote = {DIAG, RADIOLOGY}, file = {:./pdf/Rikx11d.pdf:PDF}, gsid = {6106095811807054823}, - gscites = {26}, + gscites = {29}, + ss_id = {dd4c3ad5fe9bcbaeb028818972727219bf6cbdbc}, + all_ss_ids = {['dd4c3ad5fe9bcbaeb028818972727219bf6cbdbc']}, } @inproceedings{Rikx12, @@ -19807,7 +24691,9 @@ @inproceedings{Rikx12 optnote = {DIAG, RADIOLOGY}, month = {2}, gsid = {14466201478600686172}, - gscites = {4}, + gscites = {6}, + ss_id = {07b1cfaff7bb3fcdd0df7ddd43cf46258be9438b}, + all_ss_ids = {['07b1cfaff7bb3fcdd0df7ddd43cf46258be9438b']}, } @article{Rikx12a, @@ -19825,7 +24711,9 @@ @article{Rikx12a pmid = {21984417}, month = {10}, gsid = {9186093955945686788}, - gscites = {58}, + gscites = {63}, + ss_id = {145633f228eda9babfe3f73fa981c6787f66541d}, + all_ss_ids = {['145633f228eda9babfe3f73fa981c6787f66541d']}, } @conference{Rikx12b, @@ -19902,8 +24790,10 @@ @article{Ritc16 pmid = {26994641}, month = {5}, gsid = {3679975545286903047}, - gscites = {14}, + gscites = {25}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/172195}, + ss_id = {cb695909a72f6a2b288bded8058ce8ab6398424c}, + all_ss_ids = {['cb695909a72f6a2b288bded8058ce8ab6398424c']}, } @inproceedings{Robb11, @@ -19934,7 +24824,9 @@ @article{Rodr18 optnote = {AXTI, DIAG, RADIOLOGY}, pmid = {29230524}, gsid = {14791245295281752832}, - gscites = {16}, + gscites = {25}, + ss_id = {38f5f8539b523929aafed804c3533b62abdc3781}, + all_ss_ids = {['38f5f8539b523929aafed804c3533b62abdc3781']}, } @inproceedings{Rodr18a, @@ -19948,7 +24840,9 @@ @inproceedings{Rodr18a optnote = {DIAG}, month = {2}, gsid = {12130497468928813322}, - gscites = {10}, + gscites = {21}, + ss_id = {8db5fb8ff224535dfeb095215b030e8d260344b7}, + all_ss_ids = {['8db5fb8ff224535dfeb095215b030e8d260344b7']}, } @article{Rodr18b, @@ -19966,7 +24860,37 @@ @article{Rodr18b optnote = {AXTI, DIAG, RADIOLOGY}, pmid = {29254355}, gsid = {1067383496078365227}, - gscites = {12}, + gscites = {33}, + ss_id = {419668ed3bba71b382853b171b1bf4aecd83d483}, + all_ss_ids = {['419668ed3bba71b382853b171b1bf4aecd83d483']}, +} + +@inproceedings{Rodr18c, + author = {Rodriguez-Ruiz, Alejandro and Mordang, Jan-Jurre and Karssemeijer, Nico and Sechopoulos, Ioannis and Mann, Ritse}, + title = {~~Can radiologists improve their breast cancer detection in mammography when using a deep learning based computer system as decision support?}, + doi = {10.1117/12.2317937}, + year = {2018}, + abstract = {For more than a decade, radiologists have used traditional computer aided detection systems to read mammograms, but mainly because of a low computer specificity may not improve their screening performance, according to several studies. The breakthrough in deep learning techniques has boosted the performance of machine learning algorithms, also for breast cancer detection in mammography. The objective of this study was to determine whether radiologists improve their breast cancer detection performance when they concurrently use a deep learningbased computer system for decision support, compared to when they read mammography unaided. A retrospective, fully-crossed, multi-reader multi-case (MRMC) study was designed to compare this. The employed decision support system was Transpara™ (Screenpoint Medical, Nijmegen, the Netherlands). Radiologists interact by clicking an area on the mammogram, for which the computer system displays its cancer likelihood score (1-100). In total, 240 cases (100 cancers, 40 false positive recalls, 100 normals) acquired with two different mammography systems were retrospectively collected. Seven radiologists scored each case once with, and once without the use of decision support, providing a forced BI-RADS® score and a level of suspiciousness (1-100). MRMC analysis of variance of the area under the receiver operating characteristic curves (AUC), and specificity and sensitivity were computed. When using decision support, the AUC increased from 0.87 to 0.89 (P=0.043) and specificity increased from 73% to 78% (P=0.030), while sensitivity did not significantly increment (84% to 87%, P=0.180). In conclusion, radiologists significantly improved their performance when using a deep learningbased computer system as decision support.}, + url = {http://dx.doi.org/10.1117/12.2317937}, + file = {Rodr18c.pdf:pdf\\Rodr18c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {14th International Workshop on Breast Imaging (IWBI 2018)}, + automatic = {yes}, + citation-count = {7}, +} + +@inproceedings{Rodr18c, + author = {Rodriguez-Ruiz, Alejandro and Mordang, Jan-Jurre and Karssemeijer, Nico and Sechopoulos, Ioannis and Mann, Ritse}, + title = {~~Can radiologists improve their breast cancer detection in mammography when using a deep learning based computer system as decision support?}, + doi = {10.1117/12.2317937}, + year = {2018}, + abstract = {For more than a decade, radiologists have used traditional computer aided detection systems to read mammograms, but mainly because of a low computer specificity may not improve their screening performance, according to several studies. The breakthrough in deep learning techniques has boosted the performance of machine learning algorithms, also for breast cancer detection in mammography. The objective of this study was to determine whether radiologists improve their breast cancer detection performance when they concurrently use a deep learningbased computer system for decision support, compared to when they read mammography unaided. A retrospective, fully-crossed, multi-reader multi-case (MRMC) study was designed to compare this. The employed decision support system was Transpara™ (Screenpoint Medical, Nijmegen, the Netherlands). Radiologists interact by clicking an area on the mammogram, for which the computer system displays its cancer likelihood score (1-100). In total, 240 cases (100 cancers, 40 false positive recalls, 100 normals) acquired with two different mammography systems were retrospectively collected. Seven radiologists scored each case once with, and once without the use of decision support, providing a forced BI-RADS® score and a level of suspiciousness (1-100). MRMC analysis of variance of the area under the receiver operating characteristic curves (AUC), and specificity and sensitivity were computed. When using decision support, the AUC increased from 0.87 to 0.89 (P=0.043) and specificity increased from 73% to 78% (P=0.030), while sensitivity did not significantly increment (84% to 87%, P=0.180). In conclusion, radiologists significantly improved their performance when using a deep learningbased computer system as decision support.}, + url = {http://dx.doi.org/10.1117/12.2317937}, + file = {Rodr18c.pdf:pdf\\Rodr18c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {14th International Workshop on Breast Imaging (IWBI 2018)}, + automatic = {yes}, + citation-count = {7}, } @article{Rodr19, @@ -19983,6 +24907,9 @@ @article{Rodr19 file = {Rodr19.pdf:pdf\\Rodr19.pdf:PDF}, optnote = {AXTI, DIAG, RADIOLOGY}, pmid = {30993432}, + ss_id = {1098db3a130ba082c6695a326b70d88dac71f27a}, + all_ss_ids = {['1098db3a130ba082c6695a326b70d88dac71f27a']}, + gscites = {127}, } @inproceedings{Roel03, @@ -20000,6 +24927,8 @@ @inproceedings{Roel03 month = {5}, gsid = {8711232220331594389}, gscites = {5}, + ss_id = {ae276654985fe5b01288625048662340310f37e5}, + all_ss_ids = {['ae276654985fe5b01288625048662340310f37e5']}, } @inproceedings{Roel04, @@ -20028,6 +24957,8 @@ @article{Roel06 month = {8}, gsid = {16706916398824875552}, gscites = {18}, + ss_id = {980eb600637d3f07c103029a76bc20e5b005f9eb}, + all_ss_ids = {['980eb600637d3f07c103029a76bc20e5b005f9eb']}, } @article{Roel07, @@ -20044,7 +24975,9 @@ @article{Roel07 pmid = {17185661}, month = {1}, gsid = {15832914948995714833}, - gscites = {67}, + gscites = {84}, + ss_id = {0f24ca9412671bc21bb3d62c765837877bb19e2d}, + all_ss_ids = {['0f24ca9412671bc21bb3d62c765837877bb19e2d']}, } @article{Roel12, @@ -20061,7 +24994,9 @@ @article{Roel12 optnote = {DIAG}, year = {2012}, gsid = {342965923672756254}, - gscites = {27}, + gscites = {31}, + ss_id = {6beb86f0b9019f6de377b970b6e22feba507c190}, + all_ss_ids = {['6beb86f0b9019f6de377b970b6e22feba507c190']}, } @conference{Roest22a, @@ -20081,6 +25016,9 @@ @article{Roest22b abstract = {OBJECTIVES: To evaluate the feasibility of automatic longitudinal analysis of consecutive biparametric MRI (bpMRI) scans to detect clinically significant (cs) prostate cancer (PCa). METHODS: This retrospective study included a multi-center dataset of 1513 patients who underwent bpMRI (T2 + DWI) between 2014 and 2020, of whom 73 patients underwent at least two consecutive bpMRI scans and repeat biopsies. A deep learning PCa detection model was developed to produce a heatmap of all PIRADS >= 2 lesions across prior and current studies. The heatmaps for each patient's prior and current examination were used to extract differential volumetric and likelihood features reflecting explainable changes between examinations. A machine learning classifier was trained to predict fromthese features csPCa (ISUP > 1) at the current examination according to biopsy. A classifier trained on the current study only was developed for comparison. An extended classifier was developed to incorporate clinical parameters (PSA, PSA density, and age). The cross-validated diagnostic accuracies were compared using ROC analysis. The diagnostic performance of the best model was compared to the radiologist scores. RESULTS: The model including prior and current study (AUC 0.81, CI: 0.69, 0.91) resulted in a higher (p = 0.04) diagnostic accuracy than the current only model (AUC 0.73, CI: 0.61, 0.84). Adding clinical variables further improved diagnostic performance (AUC 0.86, CI: 0.77, 0.93). The diagnostic performance of the surveillance AI model was significantly better (p = 0.02) than of radiologists (AUC 0.69, CI: 0.54, 0.81). Conclusions Our proposed AI-assisted surveillance of prostate MRI can pick up explainable, diagnostically relevant changes with promising diagnostic accuracy.}, optnote = {DIAG, RADIOLOGY}, year = {2022}, + ss_id = {0b10a3e57da548c641e38f7ad02937e6bd50203d}, + all_ss_ids = {['0b10a3e57da548c641e38f7ad02937e6bd50203d']}, + gscites = {7}, } @article{Roll09, @@ -20101,6 +25039,38 @@ @article{Roll09 gscites = {20}, } +@article{Roo22, + author = {de Roo, Saskia F. and Teunissen, Joris S. and Rutten, Matthieu J. C. M. and van der Heijden, Brigitte E. P. A.}, + title = {~~Tourniquet Does Not Affect Long-term Outcomes in Minor Hand Surgery: A Randomized Controlled Trial}, + doi = {10.1097/gox.0000000000004495}, + year = {2022}, + abstract = { + Background: + Surgeons often prefer to use a tourniquet during minor procedures, such as carpal tunnel release (CTR) or trigger finger release (TFR). Besides the possible discomfort for the patient, the effect of tourniquet use on long-term results and complications is unknown. Our primary aim was to compare the patient-reported outcomes 1 year after CTR or TFR under local anesthesia with or without tourniquet. Secondary outcomes included satisfaction, sonographically estimated scar tissue thickness after CTR, and postoperative complications. + + + Methods: + Between May 2019 and May 2020, 163 patients planned for open CTR or TFR under local anesthesia were included. Before surgery, and at 3, 6, and 12 months postoperatively, Quick Disabilities of the Arm, Shoulder and Hand and Boston Carpal Tunnel questionnaires were administered, and complications were noted. At 6 months postoperatively, an ultrasound was conducted to determine the thickness of scar tissue in the region of median nerve. + + + Results: + A total of 142 patients (51 men [38%]) were included. The Quick Disabilities of the Arm, Shoulder and Hand questionnaire and Boston Carpal Tunnel Questionnaire scores improved significantly in both groups during follow-up, wherein most improvements were seen in the first 3 months. No difference in clinical outcome and scar tissue formation was found between the two groups after 12 months. The complication rate was comparable between both groups. Thirty-two (24%) patients had at least one complication, none needed surgical interventions, and no recurrent symptoms were seen. + + + Conclusions: + Our study shows similar long-term clinical outcomes, formation of scar tissue, and complication rates for patients undergoing CTR or TFR with or without a tourniquet. Tourniquet usage should be based on shared decision-making. + }, + url = {http://dx.doi.org/10.1097/GOX.0000000000004495}, + file = {Roo22.pdf:pdf\\Roo22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Plastic and Reconstructive Surgery - Global Open}, + automatic = {yes}, + citation-count = {0}, + pages = {e4495}, + volume = {10}, + pmid = {36032380}, +} + @article{Rooi94, author = {de Rooij, T. P. and Oestmann, J. W. and Schultze Kool, L. J. and Vrooman, H. A. and Prokop, M. and Schaefer, C. M.}, title = {Application of AMBER in single- and dual-energy digital imaging: improvement in noise level and display dynamic range}, @@ -20142,6 +25112,60 @@ @article{Ross20 doi = {10.1109/TMI.2020.3043641}, pmid = {33296302}, year = {2020}, + ss_id = {d89bbd9b8c45e37b4e874fd1fb0690b1af338c29}, + all_ss_ids = {['d89bbd9b8c45e37b4e874fd1fb0690b1af338c29']}, + gscites = {15}, +} + +@article{Roth22, + author = {Roth, Holger R. and Xu, Ziyue and Tor-D\'{i}ez, Carlos and Sanchez Jacob, Ramon and Zember, Jonathan and Molto, Jose and Li, Wenqi and Xu, Sheng and Turkbey, Baris and Turkbey, Evrim and Yang, Dong and Harouni, Ahmed and Rieke, Nicola and Hu, Shishuai and Isensee, Fabian and Tang, Claire and Yu, Qinji and S\"{o}lter, Jan and Zheng, Tong and Liauchuk, Vitali and Zhou, Ziqi and Moltz, Jan Hendrik and Oliveira, Bruno and Xia, Yong and Maier-Hein, Klaus H. and Li, Qikai and Husch, Andreas and Zhang, Luyang and Kovalev, Vassili and Kang, Li and Hering, Alessa and Vila\c{c}a, Jo\~{a}o L. and Flores, Mona and Xu, Daguang and Wood, Bradford and Linguraru, Marius George}, + title = {~~Rapid artificial intelligence solutions in a pandemic--The COVID-19-20 Lung CT Lesion Segmentation Challenge}, + doi = {10.1016/j.media.2022.102605}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2022.102605}, + file = {Roth22.pdf:pdf\\Roth22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + automatic = {yes}, + citation-count = {8}, + pages = {102605}, + volume = {82}, + pmid = {34100010}, +} + +@article{Roth22, + author = {Roth, Holger R. and Xu, Ziyue and Tor-D\'{i}ez, Carlos and Sanchez Jacob, Ramon and Zember, Jonathan and Molto, Jose and Li, Wenqi and Xu, Sheng and Turkbey, Baris and Turkbey, Evrim and Yang, Dong and Harouni, Ahmed and Rieke, Nicola and Hu, Shishuai and Isensee, Fabian and Tang, Claire and Yu, Qinji and S\"{o}lter, Jan and Zheng, Tong and Liauchuk, Vitali and Zhou, Ziqi and Moltz, Jan Hendrik and Oliveira, Bruno and Xia, Yong and Maier-Hein, Klaus H. and Li, Qikai and Husch, Andreas and Zhang, Luyang and Kovalev, Vassili and Kang, Li and Hering, Alessa and Vila\c{c}a, Jo\~{a}o L. and Flores, Mona and Xu, Daguang and Wood, Bradford and Linguraru, Marius George}, + title = {~~Rapid artificial intelligence solutions in a pandemic--The COVID-19-20 Lung CT Lesion Segmentation Challenge}, + doi = {10.1016/j.media.2022.102605}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2022.102605}, + file = {Roth22.pdf:pdf\\Roth22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + automatic = {yes}, + citation-count = {8}, + pages = {102605}, + volume = {82}, + pmid = {34100010}, +} + +@article{Rubi20, + author = {Rubin, Geoffrey D. and Ryerson, Christopher J. and Haramati, Linda B. and Sverzellati, Nicola and Kanne, Jeffrey P. and Raoof, Suhail and Schluger, Neil W. and Volpi, Annalisa and Yim, Jae-Joon and Martin, Ian B.K. and Anderson, Deverick J. and Kong, Christina and Altes, Talissa and Bush, Andrew and Desai, Sujal R. and Goldin, Jonathan and Goo, Jin Mo and Humbert, Marc and Inoue, Yoshikazu and Kauczor, Hans-Ulrich and Luo, Fengming and Mazzone, Peter J. and Prokop, Mathias and Remy-Jardin, Martine and Richeldi, Luca and Schaefer-Prokop, Cornelia M. and Tomiyama, Noriyuki and Wells, Athol U. and Leung, Ann N.}, + title = {~~The Role of Chest Imaging in Patient Management During the COVID-19 Pandemic}, + doi = {10.1016/j.chest.2020.04.003}, + year = {2020}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.chest.2020.04.003}, + file = {Rubi20.pdf:pdf\\Rubi20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Chest}, + automatic = {yes}, + citation-count = {445}, + pages = {106-116}, + volume = {158}, + pmid = {32255413}, } @article{Rudy14, @@ -20186,6 +25210,34 @@ @phdthesis{Ruiz19 journal = {PhD thesis}, } +@article{Rutg21, + author = {Rutgers, Jikke J. and B\'{a}nki, Tessa and van der Kamp, Ananda and Waterlander, Tomas J. and Scheijde-Vermeulen, Marijn A. and van den Heuvel-Eibrink, Marry M. and van der Laak, Jeroen A. W. M. and Fiocco, Marta and Mavinkurve-Groothuis, Annelies M. C. and de Krijger, Ronald R.}, + title = {~~Interobserver variability between experienced and inexperienced observers in the histopathological analysis of Wilms tumors: a pilot study for future algorithmic approach}, + doi = {10.1186/s13000-021-01136-w}, + year = {2021}, + abstract = {Abstract + Background + Histopathological classification of Wilms tumors determines treatment regimen. Machine learning has been shown to contribute to histopathological classification in various malignancies but requires large numbers of manually annotated images and thus specific pathological knowledge. This study aimed to assess whether trained, inexperienced observers could contribute to reliable annotation of Wilms tumor components for classification performed by machine learning. + + Methods + Four inexperienced observers (medical students) were trained in histopathology of normal kidneys and Wilms tumors by an experienced observer (pediatric pathologist). Twenty randomly selected scanned Wilms tumor-slides (from n = 1472 slides) were annotated, and annotations were independently classified by both the inexperienced observers and two experienced pediatric pathologists. Agreement between the six observers and for each tissue element was measured using kappa statistics (k). + + Results + Pairwise interobserver agreement between all inexperienced and experienced observers was high (range: 0.845-0.950). The interobserver variability for the different histological elements, including all vital tumor components and therapy-related effects, showed high values for all k-coefficients (> 0.827). + + Conclusions + Inexperienced observers can be trained to recognize specific histopathological tumor and tissue elements with high interobserver agreement with experienced observers. Nevertheless, supervision by experienced pathologists remains necessary. Results of this study can be used to facilitate more rapid progress for supervised machine learning-based algorithm development in pediatric pathology and beyond. + }, + url = {http://dx.doi.org/10.1186/s13000-021-01136-w}, + file = {Rutg21.pdf:pdf\\Rutg21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Diagnostic Pathology}, + automatic = {yes}, + citation-count = {3}, + volume = {16}, + pmid = {34419100}, +} + @article{Saad19, author = {Saadatmand, Sepideh and Geuzinge, H Amarens and Rutgers, Emiel J T and Mann, Ritse M and de Roy van Zuidewijn, Diderick B W and Zonderland, Harmien M and Tollenaar, Rob A E M and Lobbes, Marc B I and Ausems, Margreet G E M and van 't Riet, Martijne and Hooning, Maartje J and Mares-Engelberts, Ingeborg and Luiten, Ernest J T and Heijnsdijk, Eveline A M and Verhoef, Cees and Karssemeijer, Nico and Oosterwijk, Jan C and Obdeijn, Inge-Marie and de Koning, Harry J and Tilanus-Linthorst, Madeleine M A and FaMRIsc study group}, title = {MRI versus mammography for breast cancer screening in women with familial risk (FaMRIsc): a multicentre, randomised, controlled trial}, @@ -20202,6 +25254,23 @@ @article{Saad19 pmid = {31221620}, gsid = {15260137744364653571}, gscites = {19}, + ss_id = {7cbf61f2d7b1fae068e3b815ebcb09e09555cd50}, + all_ss_ids = {['7cbf61f2d7b1fae068e3b815ebcb09e09555cd50']}, +} + +@article{Sadr23, + author = {Sadr, Soroush and Mohammad-Rahimi, Hossein and Motamedian, Saeed Reza and Zahedrozegar, Samira and Motie, Parisa and Vinayahalingam, Shankeeth and Dianat, Omid and Nosrat, Ali}, + title = {Deep Learning for Detection of Periapical Radiolucent Lesions: A Systematic Review and Meta-analysis of Diagnostic Test Accuracy.}, + doi = {10.1016/j.joen.2022.12.007}, + issue = {3}, + pages = {248--261.e3}, + volume = {49}, + abstract = {The aim of this systematic review and meta-analysis was to investigate the overall accuracy of deep learning models in detecting periapical (PA) radiolucent lesions in dental radiographs, when compared to expert clinicians. Electronic databases of Medline (via PubMed), Embase (via Ovid), Scopus, Google Scholar, and arXiv were searched. Quality of eligible studies was assessed by using Quality Assessment and Diagnostic Accuracy Tool-2. Quantitative analyses were conducted using hierarchical logistic regression for meta-analyses on diagnostic accuracy. Subgroup analyses on different image modalities (PA radiographs, panoramic radiographs, and cone beam computed tomographic images) and on different deep learning tasks (classification, segmentation, object detection) were conducted. Certainty of evidence was assessed by using Grading of Recommendations Assessment, Development, and Evaluation system. A total of 932 studies were screened. Eighteen studies were included in the systematic review, out of which 6 studies were selected for quantitative analyses. Six studies had low risk of bias. Twelve studies had risk of bias. Pooled sensitivity, specificity, positive likelihood ratio, negative likelihood ratio, and diagnostic odds ratio of included studies (all image modalities; all tasks) were 0.925 (95% confidence interval [CI], 0.862-0.960), 0.852 (95% CI, 0.810-0.885), 6.261 (95% CI, 4.717-8.311), 0.087 (95% CI, 0.045-0.168), and 71.692 (95% CI, 29.957-171.565), respectively. No publication bias was detected (Egger's test, P = .82). Grading of Recommendations Assessment, Development and Evaluationshowed a "high" certainty of evidence for the studies included in the meta-analyses. Compared to expert clinicians, deep learning showed highly accurate results in detecting PA radiolucent lesions in dental radiographs. Most studies had risk of bias. There was a lack of prospective studies.}, + file = {Sadr23.pdf:pdf\\Sadr23.pdf:PDF}, + journal = {Journal of endodontics}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36563779}, + year = {2023}, } @inproceedings{Saha20a, @@ -20228,6 +25297,9 @@ @inproceedings{Saha20b abstract = {We hypothesize that anatomical priors can be viable mediums to infuse domain-specific clinical knowledge into state-of-the-art convolutional neural networks (CNN) based on the U-Net architecture. We introduce a probabilistic population prior which captures the spatial prevalence and zonal distinction of clinically significant prostate cancer (csPCa), in order to improve its computer-aided detection (CAD) in bi-parametric MR imaging (bpMRI). To evaluate performance, we train 3D adaptations of the U-Net, U-SEResNet, UNet++ and Attention U-Net using 800 institutional training-validation scans, paired with radiologically-estimated annotations and our computed prior. For 200 independent testing bpMRI scans with histologically-confirmed delineations of csPCa, our proposed method of encoding clinical priori demonstrates a strong ability to improve patient-based diagnosis (upto 8.70% increase in AUROC) and lesion-level detection (average increase of 1.08 pAUC between 0.1-1.0 false positive per patient) across all four architectures.}, optnote = {DIAG, RADIOLOGY}, gsid = {17689577475889939052}, + ss_id = {0932c8d086f3646cd8a42f9ee5353fe092d73cd4}, + all_ss_ids = {['0932c8d086f3646cd8a42f9ee5353fe092d73cd4']}, + gscites = {5}, } @inproceedings{Saha20c, @@ -20263,12 +25335,15 @@ @article{Saha21a doi = {10.1016/j.media.2021.102155}, url = {https://www.sciencedirect.com/science/article/pii/S1361841521002012}, abstract = {We present a multi-stage 3D computer-aided detection and diagnosis (CAD) model for automated localization of clinically significant prostate cancer (csPCa) in bi-parametric MR imaging (bpMRI). Deep attention mechanisms drive its detection network, targeting salient structures and highly discriminative feature dimensions across multiple resolutions. Its goal is to accurately identify csPCa lesions from indolent cancer and the wide range of benign pathology that can afflict the prostate gland. Simultaneously, a decoupled residual classifier is used to achieve consistent false positive reduction, without sacrificing high sensitivity or computational efficiency. In order to guide model generalization with domain-specific clinical knowledge, a probabilistic anatomical prior is used to encode the spatial prevalence and zonal distinction of csPCa. Using a large dataset of 1950 prostate bpMRI paired with radiologically-estimated annotations, we hypothesize that such CNN-based models can be trained to detect biopsy-confirmed malignancies in an independent cohort. - - For 486 institutional testing scans, the 3D CAD system achieves 83.69+-5.22% and 93.19+-2.96% detection sensitivity at 0.50 and 1.46 false positive(s) per patient, respectively, with 0.882+-0.030 AUROC in patient-based diagnosis -significantly outperforming four state-of-the-art baseline architectures (U-SEResNet, UNet++, nnU-Net, Attention U-Net) from recent literature. For 296 external biopsy-confirmed testing scans, the ensembled CAD system shares moderate agreement with a consensus of expert radiologists (76.69%; kappa = 0.51+-0.04) and independent pathologists (81.08%; kappa = 0.56+-0.06); demonstrating strong generalization to histologically-confirmed csPCa diagnosis.}, + + For 486 institutional testing scans, the 3D CAD system achieves 83.69+-5.22% and 93.19+-2.96% detection sensitivity at 0.50 and 1.46 false positive(s) per patient, respectively, with 0.882+-0.030 AUROC in patient-based diagnosis -significantly outperforming four state-of-the-art baseline architectures (U-SEResNet, UNet++, nnU-Net, Attention U-Net) from recent literature. For 296 external biopsy-confirmed testing scans, the ensembled CAD system shares moderate agreement with a consensus of expert radiologists (76.69%; kappa = 0.51+-0.04) and independent pathologists (81.08%; kappa = 0.56+-0.06); demonstrating strong generalization to histologically-confirmed csPCa diagnosis.}, optnote = {DIAG, RADIOLOGY}, algorithm = {https://grand-challenge.org/algorithms/prostate-mri-cad-cspca/}, gsid = {10384137846444027679}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/238409}, + ss_id = {16bd5fd779ba5acfd59403d3e91d6cf6ecd28f93}, + all_ss_ids = {['16bd5fd779ba5acfd59403d3e91d6cf6ecd28f93']}, + gscites = {80}, } @inproceedings{Saha21b, @@ -20279,6 +25354,9 @@ @inproceedings{Saha21b url = {https://arxiv.org/abs/2110.12889}, abstract = {We hypothesize that probabilistic voxel-level classification of anatomy and malignancy in prostate MRI, although typically posed as near-identical segmentation tasks via U-Nets, require different loss functions for optimal performance due to inherent differences in their clinical objectives. We investigate distribution, region and boundary-based loss functions for both tasks across 200 patient exams from the publicly-available ProstateX dataset. For evaluation, we conduct a thorough comparative analysis of model predictions and calibration, measured with respect to multi-class volume segmentation of the prostate anatomy (whole-gland, transitional zone, peripheral zone), as well as, patient-level diagnosis and lesion-level detection of clinically significant prostate cancer. Notably, we find that distribution-based loss functions (in particular, focal loss) are well-suited for diagnostic or panoptic segmentation tasks such as lesion detection, primarily due to their implicit property of inducing better calibration. Meanwhile, (with the exception of focal loss) both distribution and region/boundary-based loss functions perform equally well for anatomical or semantic segmentation tasks, such as quantification of organ shape, size and boundaries.}, optnote = {DIAG, RADIOLOGY}, + ss_id = {cad5d5cc2c32623da074133016d1db3da410fc7b}, + all_ss_ids = {['cad5d5cc2c32623da074133016d1db3da410fc7b']}, + gscites = {5}, } @conference{Saha21c, @@ -20300,16 +25378,16 @@ @conference{Saha22a } @conference{Saha23a, - author = {Anindo Saha and Joeran S. Bosma and Jasper Twilt and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Fütterer and Maarten de Rooij and Henkjan Huisman}, + author = {Anindo Saha and Joeran S. Bosma and Jasper Twilt and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Maarten de Rooij and Henkjan Huisman}, title = {Artificial Intelligence and Radiologists at Prostate Cancer Detection in MRI: The PI-CAI Challenge}, booktitle = ECR, year = {2023}, - abstract = {PURPOSE: The PI-CAI challenge aims to validate the diagnostic performance of artificial intelligence (AI) and radiologists at clinically significant prostate cancer (csPCa) detection in MRI, with histopathology and follow-up (≥3 years) as the reference standard. METHODS: This retrospective study includes 10,207 prostate MRI exams (9129 patients) curated from four European tertiary care centers between 2012-2021. All patients were men suspected of harboring prostate cancer, without a history of treatment or prior csPCa findings. Imaging was acquired using various 1.5 or 3T MRI scanners, equipped with surface coils. Algorithm developers worldwide were invited to develop AI models for detecting csPCa in bipara-metric MRI (bpMRI). For a given bpMRI exam, AI models were required to complete two tasks: localize all csPCa lesions (if any), and predict the case-level likelihood of csPCa diagnosis. To this end, AI models could use imaging data and several variables (e.g. patient age, PSA level, scanner model) to inform their predictions. Once devel-oped, these algorithms were independently tested using 1000 cases (including external data) in a fully-blinded setting. RESULTS: The PI-CAI study protocol was established in conjunction with 16 experts across prostate radiology, urology and AI. Between June-November 2022, >830 individuals (>50 countries) opted-in and >310 algorithm submissions were made. When trained on 1500 training cases, the top five most performant AI models reached 0.88±0.01 AUROC in patient diagnosis, and 76.38±0.74% sensitivity at 0.5 false positives per case in lesion detection. CONCLUSION: Preliminary findings indicate that the diagnostic performance of state-of-the-art AI models is comparable to that of radiologists reported in literature. LIMITATIONS: Radiology readings of the original data were used to guide biopsy planning, histopathology grading, and in turn, set the reference standard.}, + abstract = {PURPOSE: The PI-CAI challenge aims to validate the diagnostic performance of artificial intelligence (AI) and radiologists at clinically significant prostate cancer (csPCa) detection in MRI, with histopathology and follow-up (>=3 years) as the reference standard. METHODS: This retrospective study includes 10,207 prostate MRI exams (9129 patients) curated from four European tertiary care centers between 2012-2021. All patients were men suspected of harboring prostate cancer, without a history of treatment or prior csPCa findings. Imaging was acquired using various 1.5 or 3T MRI scanners, equipped with surface coils. Algorithm developers worldwide were invited to develop AI models for detecting csPCa in bipara-metric MRI (bpMRI). For a given bpMRI exam, AI models were required to complete two tasks: localize all csPCa lesions (if any), and predict the case-level likelihood of csPCa diagnosis. To this end, AI models could use imaging data and several variables (e.g. patient age, PSA level, scanner model) to inform their predictions. Once devel-oped, these algorithms were independently tested using 1000 cases (including external data) in a fully-blinded setting. RESULTS: The PI-CAI study protocol was established in conjunction with 16 experts across prostate radiology, urology and AI. Between June-November 2022, >830 individuals (>50 countries) opted-in and >310 algorithm submissions were made. When trained on 1500 training cases, the top five most performant AI models reached 0.88+-0.01 AUROC in patient diagnosis, and 76.38+-0.74% sensitivity at 0.5 false positives per case in lesion detection. CONCLUSION: Preliminary findings indicate that the diagnostic performance of state-of-the-art AI models is comparable to that of radiologists reported in literature. LIMITATIONS: Radiology readings of the original data were used to guide biopsy planning, histopathology grading, and in turn, set the reference standard.}, optnote = {DIAG, RADIOLOGY}, } @inproceedings{Saha23b, - author = {Anindo Saha and Joeran S. Bosma and Jasper Twilt and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Fütterer and Maarten de Rooij and Henkjan Huisman}, + author = {Anindo Saha and Joeran S. Bosma and Jasper Twilt and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Maarten de Rooij and Henkjan Huisman}, title = {Artificial Intelligence and Radiologists at Prostate Cancer Detection in MRI: The PI-CAI Challenge}, booktitle = MIDL, year = {2023}, @@ -20348,12 +25426,31 @@ @article{Saks14 month = {11}, gsid = {15875798834789801368}, gscites = {9}, + ss_id = {6873c25b4466ae69a4755dd1f937bf0b82f6ad11}, + all_ss_ids = {['6873c25b4466ae69a4755dd1f937bf0b82f6ad11']}, } -@mastersthesis{Samu06, - author = {Samulski, Maurice}, - title = {Classification of {B}reast {L}esions in {D}igital {M}ammograms}, - year = {2006}, +@article{Samp22, + author = {Samperna, Riccardo and Moriakov, Nikita and Karssemeijer, Nico and Teuwen, Jonas and Mann, Ritse M.}, + title = {~~Exploiting the Dixon Method for a Robust Breast and Fibro-Glandular Tissue Segmentation in Breast MRI}, + doi = {10.3390/diagnostics12071690}, + year = {2022}, + abstract = {Automatic breast and fibro-glandular tissue (FGT) segmentation in breast MRI allows for the efficient and accurate calculation of breast density. The U-Net architecture, either 2D or 3D, has already been shown to be effective at addressing the segmentation problem in breast MRI. However, the lack of publicly available datasets for this task has forced several authors to rely on internal datasets composed of either acquisitions without fat suppression (WOFS) or with fat suppression (FS), limiting the generalization of the approach. To solve this problem, we propose a data-centric approach, efficiently using the data available. By collecting a dataset of T1-weighted breast MRI acquisitions acquired with the use of the Dixon method, we train a network on both T1 WOFS and FS acquisitions while utilizing the same ground truth segmentation. Using the "plug-and-play" framework nnUNet, we achieve, on our internal test set, a Dice Similarity Coefficient (DSC) of 0.96 and 0.91 for WOFS breast and FGT segmentation and 0.95 and 0.86 for FS breast and FGT segmentation, respectively. On an external, publicly available dataset, a panel of breast radiologists rated the quality of our automatic segmentation with an average of 3.73 on a four-point scale, with an average percentage agreement of 67.5%.}, + url = {http://dx.doi.org/10.3390/diagnostics12071690}, + file = {Samp22.pdf:pdf\\Samp22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Diagnostics}, + automatic = {yes}, + citation-count = {0}, + pages = {1690}, + volume = {12}, + pmid = {35885594}, +} + +@mastersthesis{Samu06, + author = {Samulski, Maurice}, + title = {Classification of {B}reast {L}esions in {D}igital {M}ammograms}, + year = {2006}, abstract = {{B}reast cancer is the most common life-threatening type of cancer affecting women in {T}he {N}etherlands. {A}bout 10% of the {D}utch women have to face breast cancer in their lifetime. {T}he success of the treatment of breast cancer largely depends on the stage of a tumor at the time of detection. {I}f the size of the invasive cancer is smaller than 20 mm and no metastases are found, chances of successful treatment are high. {T}herefore, early detection of breast cancer is essential. {A}lthough mammography screening is currently the most effective tool for early detection of breast cancer, up to one-fifth of women with invasive breast cancer have a mammogram that is interpreted as normal, i.e., a false-negative mammogram result. {A}n important cause are interpretation errors, i.e., when a radiologist sees the cancer, but classify it as benign. {I}n addition, the number of false-positive mammogram results is quite high, more than half of women who undergo a biopsy actually have breast cancer. {T}o overcome such limitations, {C}omputer-{A}ided {D}iagnosis ({CAD}) systems for automatic classification of breast lesions as either benign or malignant are being developed. {CAD} systems help radiologists with the interpretation of lesions, such that they refer less women for further examination when they actually have benign lesions. {T}he dataset we used consists of mammographic features extracted by automated image processing algorithms from digitized mammograms of the {D}utch screening programme. {I}n this thesis we constructed several types of classifiers, i.e., {B}ayesian networks and support vector machines, for the task of computer-aided diagnosis of breast lesions. {W}e evaluated the results with receiver operating characteristic ({ROC}) analysis to compare their classification performance. {T}he overall conclusion is that support vector machines are still the method of choice if the aim is to maximize classification performance. {A}lthough {B}ayesian networks are not primarily designed for classification problems, they did not perform drastically lower. {I}f new datasets are being constructed and more background knowledge becomes available, the advantages of {B}ayesian networks, i.e., incorporating domain knowledge and modeling dependencies, could play an important role in the future.}, file = {Samu06.pdf:pdf\\Samu06.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, @@ -20378,6 +25475,8 @@ @inproceedings{Samu07 month = {3}, gsid = {4945331176484965869}, gscites = {13}, + ss_id = {17076b47c70f579a9e3fc9a898c86dfe62fdfdca}, + all_ss_ids = {['17076b47c70f579a9e3fc9a898c86dfe62fdfdca']}, } @inproceedings{Samu08, @@ -20397,6 +25496,8 @@ @inproceedings{Samu08 month = {3}, gsid = {12671692002821730625}, gscites = {20}, + ss_id = {11ddc349211e0ee1ff0e463ef6933bfa9d3919fa}, + all_ss_ids = {['11ddc349211e0ee1ff0e463ef6933bfa9d3919fa']}, } @inproceedings{Samu08a, @@ -20417,6 +25518,9 @@ @inproceedings{Samu08b abstract = {{PURPOSE}/{AIM} {T}o experience the use of an interactive computer-aided decision support system for the detection of mammographic masses. {T}o demonstrate the real-time classification of breast lesions. {CONTENT} {ORGANIZATION} {T}he idea of using {CAD} in an interactive way will be explained. {T}hen a case review will be offered, in which participants evaluate a small set of abnormal and normal screening mammograms in two sequential sessions: one without and the other with interactive {CAD}. {P}articipants are asked to find and rate abnormal masses. {A}t the end of the session, participants can judge their performance and are given the opportunity to review their scores for each case. {SUMMARY} {C}urrent computer-aided detection workstations display suspicious mammographic regions identified by computer algorithms as prompts to avoid perceptual oversights. {I}n the presented system the presence of {CAD} regions can be probed interactively using a mouse click and aid the radiologist with the interpretation of masses. {I}nitial studies suggest that readers may improve their detection performance using {CAD} in an interactive way.}, file = {A1 Poster:pdf\\Samu08b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {e0f346c924c5e504a8318026e2d38d33bc6069b7}, + all_ss_ids = {['e0f346c924c5e504a8318026e2d38d33bc6069b7']}, + gscites = {1}, } @inproceedings{Samu09, @@ -20435,7 +25539,9 @@ @inproceedings{Samu09 number = {1}, month = {2}, gsid = {15103250986617971832}, - gscites = {4}, + gscites = {5}, + ss_id = {390fb83c871c013ba98b5590a8c729266b05ed87}, + all_ss_ids = {['390fb83c871c013ba98b5590a8c729266b05ed87']}, } @article{Samu10, @@ -20453,7 +25559,9 @@ @article{Samu10 pmid = {20532890}, month = {6}, gsid = {4560188704602657719}, - gscites = {67}, + gscites = {69}, + ss_id = {7fa621703a760695adebedc925d1749efe73797d}, + all_ss_ids = {['7fa621703a760695adebedc925d1749efe73797d']}, } @inproceedings{Samu11, @@ -20472,6 +25580,8 @@ @inproceedings{Samu11 month = {3}, gsid = {13676975540218125841}, gscites = {7}, + ss_id = {cfd171ef5dd78227c2253f32340d7d4c040d4444}, + all_ss_ids = {['cfd171ef5dd78227c2253f32340d7d4c040d4444']}, } @article{Samu11a, @@ -20489,7 +25599,9 @@ @article{Samu11a pmid = {21233045}, month = {4}, gsid = {18346121360535689983}, - gscites = {62}, + gscites = {80}, + ss_id = {413ed0cca2f1915d279ae891324633c18c89cb74}, + all_ss_ids = {['413ed0cca2f1915d279ae891324633c18c89cb74']}, } @phdthesis{Samu11c, @@ -20559,7 +25671,9 @@ @article{Sanc08 pmid = {17556004}, month = {4}, gsid = {806873315643611742}, - gscites = {173}, + gscites = {183}, + ss_id = {896b8f5aeb3a16a887fb0fe1199096551b6f2ea9}, + all_ss_ids = {['896b8f5aeb3a16a887fb0fe1199096551b6f2ea9']}, } @phdthesis{Sanc08b, @@ -20588,7 +25702,9 @@ @inproceedings{Sanc09 optnote = {DIAG, RADIOLOGY}, month = {2}, gsid = {7951641732601365129}, - gscites = {10}, + gscites = {11}, + ss_id = {d6f43ce72b81460ebe93f3f3c959ff5983d66b07}, + all_ss_ids = {['d6f43ce72b81460ebe93f3f3c959ff5983d66b07']}, } @article{Sanc09a, @@ -20606,7 +25722,9 @@ @article{Sanc09a pmid = {19539518}, month = {8}, gsid = {16533196676659344093}, - gscites = {206}, + gscites = {225}, + ss_id = {699e59747454c422766132be28c2aa3c4c04de99}, + all_ss_ids = {['699e59747454c422766132be28c2aa3c4c04de99']}, } @inproceedings{Sanc09b, @@ -20622,7 +25740,9 @@ @inproceedings{Sanc09b file = {Sanc09b.pdf:pdf\\Sanc09b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {3729643137782569352}, - gscites = {61}, + gscites = {75}, + ss_id = {7c96564eb21ed23357d3ce5f33445756002ae47e}, + all_ss_ids = {['7c96564eb21ed23357d3ce5f33445756002ae47e']}, } @inproceedings{Sanc10, @@ -20637,7 +25757,9 @@ @inproceedings{Sanc10 optnote = {DIAG, RADIOLOGY}, month = {4}, gsid = {1493852564349449873}, - gscites = {33}, + gscites = {37}, + ss_id = {51371dc70506dbdcc2d79eb28783c80226d8bc36}, + all_ss_ids = {['51371dc70506dbdcc2d79eb28783c80226d8bc36']}, } @inproceedings{Sanc10a, @@ -20654,7 +25776,9 @@ @inproceedings{Sanc10a optnote = {DIAG, RADIOLOGY}, pmid = {20879450}, gsid = {17509952020796744898}, - gscites = {15}, + gscites = {21}, + ss_id = {a0083e8d84d4858e4c85dcfce6db4ec18b57535a}, + all_ss_ids = {['a0083e8d84d4858e4c85dcfce6db4ec18b57535a']}, } @article{Sanc11, @@ -20671,7 +25795,9 @@ @article{Sanc11 pmid = {21527381}, month = {6}, gsid = {2962166101861378440}, - gscites = {78}, + gscites = {116}, + ss_id = {c8ae5ea9ede39decae80e0e8f9a13b8b87fb6f75}, + all_ss_ids = {['c8ae5ea9ede39decae80e0e8f9a13b8b87fb6f75']}, } @article{Sanc12, @@ -20689,8 +25815,10 @@ @article{Sanc12 pmid = {21689964}, month = {1}, gsid = {13166674494125805160}, - gscites = {43}, + gscites = {45}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110688}, + ss_id = {30a5be97d463a7e30248c1c5cc3511ded6205d8f}, + all_ss_ids = {['30a5be97d463a7e30248c1c5cc3511ded6205d8f']}, } @conference{Sanc15a, @@ -20700,6 +25828,8 @@ @conference{Sanc15a abstract = {Purpose: To evaluate an observer-independent image analysis algorithm that automatically quantifies the area of geographic atrophy in fundus autofluorescence images of Stargardt patients. Methods: Fundus autofluorescence images of 20 eyes of 20 Stargardt patients with presence of one delineated or patchy atrophy region in the macular area were selected. An image analysis algorithm was developed to automatically segment the area of atrophy starting from an arbitrarily selected seed point inside the atrophy region. The method was based on a combination of region growing algorithm and a dynamic, user-independent threshold selection procedure using Otsu thresholding. In order to assess the performance obtained by the proposed algorithm, manual annotations were made by an experienced human grader. The grader manually delineated the atrophy areas on the same set of images using dedicated software developed for this task. Results: A high correlation was observed between the manual area measurements and the automatically quantified values obtained by the proposed algorithm, with a mean intra-class correlation coefficient ({ICC}) value larger than 0.89. In addition, the quantification time was reduced substantially by a factor of 27 compared to manual assessment. The output of the software was also shown to be independent of the user input and highly reproducible, with an {ICC} value larger than 0.99 between two executions of the algorithm at different time points and with different seed points. Conclusions: An image analysis algorithm for automatic quantification of geographic atrophy in autofluorescence images of Stargardt patients was developed. The proposed algorithm allows for precise, reproducible and fast quantification of the atrophy area, providing an accurate procedure to measure disease progression and assess potential therapies in large dataset analyses independent of human observers.}, optnote = {DIAG, RADIOLOGY}, year = {2015}, + all_ss_ids = {589037d110f74defb9cac3b42a370f7b044a1c4b}, + gscites = {3}, } @conference{Sand19, @@ -20708,52 +25838,52 @@ @conference{Sand19 booktitle = ISMRM, year = {2019}, abstract = {Synopsis - The aim of this study was to compare a prototype simultaneous multi-slice single-shot echo planar imaging (SMS-ss-DWI-EPI) sequence with - conventional readout-segmented echo-planar imaging (rs-DWI-EPI) for diffusion-weighted imaging of the breast at 3T magnetic resonance - imaging (MRI). A reader study was conducted to evaluate image quality, lesion conspicuity and BI-RADS score. Our results show that although - the image quality with the conventional rs-DWI-EPI is superior, malignant lesions have improved visibility with the SMS-ss-DWI-EPI sequence. - - Introduction - The addition of diffusion-weighted imaging (DWI) to contrast-enhanced breast MRI improves the classification of breast lesions, which leads in turn to an - increased positive predictive value of biopsies. Consequently, DWI with evaluation of the corresponding apparent diffusion coefficient (ADC) is included in - most state-of-the-art breast MRI protocols . The echo train of the readout-segmented echo-planar imaging-based DWI sequence (rs-DWI-EPI) was - shortened to reduce distortion and improve the resulting image quality. However, this sequence results in a lower signal-to-noise ratio (SNR) than singleshot - echo planar imaging (ss-EPI) . In practice, detection of lesions on DWI is often problematic due to a relatively low lesion conspicuity. To improve the - detectability of lesions and the speed of acquisition, a prototype DWI sequence, the simultaneous multi-slice single-shot DWI-EPI (SMS-ss-DWI-EPI), was - developed. In this study we compare this prototype sequence with rs-DWI-EPI at 3T, in terms of image quality (IQ), lesion conspicuity, and breast imaging - reporting and data system (BI-RADS ) score. - - Methods - From September 2017 to August 2018, 15 women with known breast cancer or suspicious breast lesions were included, after providing signed informed - consent. Women were scanned with the conventional rs-DWI-EPI and the SMS-ss-DWI-EPI during the same clinical examination on a 3T MAGNETOM - Skyra system (Siemens Healthcare, Erlangen, Germany) using a 16-channel bilateral breast coil. Parameters of the rs-DWI-EPI sequence were: TR: 5450 - ms, TE: 57 ms, FoV: 340 mm, voxel size: 1.2x1.2x5 mm , acquisition time: 4:23 min, b-values: 50, 850 s/mm , SPAIR fat suppression. Parameters of the - SMS-ss-DWI-EPI sequence were: TR: 4000 ms, TE: 70 ms, FoV: 360 mm, voxel size: 0.9(i)x0.9(i)x4 mm , acquisition time: 2:45 min, b-values: 50, 400, - 800 s/mm , SPAIR fat suppression. In addition, the clinical protocol included one pre- and five post-contrast administration regular T1-weighted Dixon - acquisitions, ultrafast T1-weighted TWIST acquisitions during the inflow of contrast, and a T2 weighted Dixon acquisition. In total, 33 lesions (27 malignant, - 5 benign and 1 unknown) were detected on the contrast-enhanced series and described in the clinical MRI reports. Two dedicated breast radiologists (4 - and 10 years of experience with breast MRI) independently scored both sequences for overall IQ (1: extremely poor to 9: excellent). All lesions were also - independently evaluated for conspicuity (1: not visible, 2: visible if location is given, 3: visible), and a BI-RADS score (1 to 5) was given for each lesion. - Statistical analysis was performed in SPSS using the Wilcoxon signed-rank test. - - Results - Results are presented in Table 1. Overall IQ was significantly higher for the conventional rs-DWI-EPI than for the SMS-ss-DWI-EPI (p=0.006). Lesion - conspicuity scores were significantly higher for SMS-ss-DWI-EPI (p=0.016). Benign lesions had similar conspicuity with both sequences while malignant - lesions had significantly higher conspicuity with SMS-ss-DWI-EPI (p=0.027) (for example, see Figure 1). There was no significant difference in BI-RADS - scores (p=0.151) between the two sequences. - - Discussion - Although the conventional rs-DWI-EPI sequence results in better IQ, in general ss-EPI results in a higher SNR, which may lead to better visibility of - malignant lesions with SMS-ss-DWI-EPI. This might eventually improve the clinical value of DWI in addition to contrast enhanced breast MRI. - Simultaneous Multi-Slice (SMS) ensures that slices are excited simultaneously with a multiband pulse, which leads to a reduced acquisition time. In our - protocol, the combination of ss-EPI and SMS results in a higher spatial resolution while still having a shorter acquisition time than the conventional - sequence. The higher achievable spatial resolution may be an important factor for the improved lesion visibility, and conspicuity of malignant lesions. This - may make the SMS approach suitable for fast screening and diagnosis of breast cancer. Still, further development of the SMS-ss-DWI-EPI sequence is - needed for improved IQ and even better lesion conspicuity. Extension of the data pool and evaluation by additional readers is pending. - - Conclusion - Despite the perceived poorer image quality of the SMS-ss-DWI-EPI sequence, malignant lesions are better visualized using this sequence. When image - quality and conspicuity are further improved, this technique might enable improved lesion detection on unenhanced diffusion weighted breast MRI.}, + The aim of this study was to compare a prototype simultaneous multi-slice single-shot echo planar imaging (SMS-ss-DWI-EPI) sequence with + conventional readout-segmented echo-planar imaging (rs-DWI-EPI) for diffusion-weighted imaging of the breast at 3T magnetic resonance + imaging (MRI). A reader study was conducted to evaluate image quality, lesion conspicuity and BI-RADS score. Our results show that although + the image quality with the conventional rs-DWI-EPI is superior, malignant lesions have improved visibility with the SMS-ss-DWI-EPI sequence. + + Introduction + The addition of diffusion-weighted imaging (DWI) to contrast-enhanced breast MRI improves the classification of breast lesions, which leads in turn to an + increased positive predictive value of biopsies. Consequently, DWI with evaluation of the corresponding apparent diffusion coefficient (ADC) is included in + most state-of-the-art breast MRI protocols . The echo train of the readout-segmented echo-planar imaging-based DWI sequence (rs-DWI-EPI) was + shortened to reduce distortion and improve the resulting image quality. However, this sequence results in a lower signal-to-noise ratio (SNR) than singleshot + echo planar imaging (ss-EPI) . In practice, detection of lesions on DWI is often problematic due to a relatively low lesion conspicuity. To improve the + detectability of lesions and the speed of acquisition, a prototype DWI sequence, the simultaneous multi-slice single-shot DWI-EPI (SMS-ss-DWI-EPI), was + developed. In this study we compare this prototype sequence with rs-DWI-EPI at 3T, in terms of image quality (IQ), lesion conspicuity, and breast imaging + reporting and data system (BI-RADS ) score. + + Methods + From September 2017 to August 2018, 15 women with known breast cancer or suspicious breast lesions were included, after providing signed informed + consent. Women were scanned with the conventional rs-DWI-EPI and the SMS-ss-DWI-EPI during the same clinical examination on a 3T MAGNETOM + Skyra system (Siemens Healthcare, Erlangen, Germany) using a 16-channel bilateral breast coil. Parameters of the rs-DWI-EPI sequence were: TR: 5450 + ms, TE: 57 ms, FoV: 340 mm, voxel size: 1.2x1.2x5 mm , acquisition time: 4:23 min, b-values: 50, 850 s/mm , SPAIR fat suppression. Parameters of the + SMS-ss-DWI-EPI sequence were: TR: 4000 ms, TE: 70 ms, FoV: 360 mm, voxel size: 0.9(i)x0.9(i)x4 mm , acquisition time: 2:45 min, b-values: 50, 400, + 800 s/mm , SPAIR fat suppression. In addition, the clinical protocol included one pre- and five post-contrast administration regular T1-weighted Dixon + acquisitions, ultrafast T1-weighted TWIST acquisitions during the inflow of contrast, and a T2 weighted Dixon acquisition. In total, 33 lesions (27 malignant, + 5 benign and 1 unknown) were detected on the contrast-enhanced series and described in the clinical MRI reports. Two dedicated breast radiologists (4 + and 10 years of experience with breast MRI) independently scored both sequences for overall IQ (1: extremely poor to 9: excellent). All lesions were also + independently evaluated for conspicuity (1: not visible, 2: visible if location is given, 3: visible), and a BI-RADS score (1 to 5) was given for each lesion. + Statistical analysis was performed in SPSS using the Wilcoxon signed-rank test. + + Results + Results are presented in Table 1. Overall IQ was significantly higher for the conventional rs-DWI-EPI than for the SMS-ss-DWI-EPI (p=0.006). Lesion + conspicuity scores were significantly higher for SMS-ss-DWI-EPI (p=0.016). Benign lesions had similar conspicuity with both sequences while malignant + lesions had significantly higher conspicuity with SMS-ss-DWI-EPI (p=0.027) (for example, see Figure 1). There was no significant difference in BI-RADS + scores (p=0.151) between the two sequences. + + Discussion + Although the conventional rs-DWI-EPI sequence results in better IQ, in general ss-EPI results in a higher SNR, which may lead to better visibility of + malignant lesions with SMS-ss-DWI-EPI. This might eventually improve the clinical value of DWI in addition to contrast enhanced breast MRI. + Simultaneous Multi-Slice (SMS) ensures that slices are excited simultaneously with a multiband pulse, which leads to a reduced acquisition time. In our + protocol, the combination of ss-EPI and SMS results in a higher spatial resolution while still having a shorter acquisition time than the conventional + sequence. The higher achievable spatial resolution may be an important factor for the improved lesion visibility, and conspicuity of malignant lesions. This + may make the SMS approach suitable for fast screening and diagnosis of breast cancer. Still, further development of the SMS-ss-DWI-EPI sequence is + needed for improved IQ and even better lesion conspicuity. Extension of the data pool and evaluation by additional readers is pending. + + Conclusion + Despite the perceived poorer image quality of the SMS-ss-DWI-EPI sequence, malignant lesions are better visualized using this sequence. When image + quality and conspicuity are further improved, this technique might enable improved lesion detection on unenhanced diffusion weighted breast MRI.}, optnote = {DIAG}, } @@ -20772,7 +25902,9 @@ @article{Sand19a optnote = {AXTI, DIAG}, pmid = {31049740}, gsid = {11238611003834879571}, - gscites = {2}, + gscites = {5}, + ss_id = {57098a73d38c2b147b1dd2574d87b4c30eec21ab}, + all_ss_ids = {['57098a73d38c2b147b1dd2574d87b4c30eec21ab']}, } @conference{Sand20, @@ -20780,24 +25912,75 @@ @conference{Sand20 booktitle = {ISMRM Benelux}, title = {Simultaneous multi-slice single-shot DWI compared to routine read-out-segmented DWI for evaluation of breast lesions}, abstract = {Synopsis - The aim of this study was to compare a prototype simultaneous multi-slice single-shot echo planar imaging (SMS-ss-DWI-EPI) sequence with conventional readout-segmented echo-planar imaging (rs-DWI-EPI) for diffusion-weighted imaging of the breast at 3T magnetic resonance imaging (MRI). A reader study was conducted to evaluate image quality, lesion conspicuity and BI-RADS score. Our results show that although the image quality with the conventional rs-DWI-EPI is superior, malignant lesions have improved visibility with the SMS-ss-DWI-EPI sequence. - - Introduction - The addition of diffusion-weighted imaging (DWI) to contrast-enhanced breast MRI improves the classification of breast lesions, which leads in turn to an increased positive predictive value of biopsies. Consequently, DWI with evaluation of the corresponding apparent diffusion coefficient (ADC) is included in most state-of-the-art breast MRI protocols. The echo train of the readout-segmented echo-planar imaging-based DWI sequence (rs-DWI-EPI) was shortened to reduce distortion and improve the resulting image quality. However, this sequence results in a lower signal-to-noise ratio (SNR) than single-shot echo planar imaging (ss-EPI). In practice, detection of lesions on DWI is often problematic due to a relatively low lesion conspicuity. To improve the detectability of lesions and the speed of acquisition, a prototype DWI sequence, the simultaneous multi-slice single-shot DWI-EPI (SMS-ss-DWI-EPI), was developed. In this study, we compare this prototype sequence with rs-DWI-EPI at 3T, in terms of image quality (IQ), lesion conspicuity, and the presence of artifacts. - - Methods - From September 2017 to December 2018, 25 women with known breast cancer or suspicious breast lesions were included, after providing signed informed consent. Women were scanned with the conventional rs-DWI-EPI and the SMS-ss-DWI-EPI during the same clinical examination on a 3T MAGNETOM Skyra system (Siemens Healthcare, Erlangen, Germany) using a 16-channel bilateral breast coil. Parameters of the rs-DWI-EPI sequence were: TR: 5450 ms, TE: 57 ms, FoV: 340 mm, voxel size: 1.2x1.2x5 mm , acquisition time: 4:23 min, b-values: 50, 850 s/mm , SPAIR fat suppression. Parameters of the SMS-ss-DWI-EPI sequence were: TR: 4000 ms, TE: 70 ms, FoV: 360 mm, voxel size: 0.9(i)x0.9(i)x4 mm, acquisition time: 2:45 min, b-values: 50, 400, 800 s/mm , SPAIR fat suppression. In addition, the clinical protocol included one pre- and five post-contrast regular T1-weighted Dixon acquisitions, ultrafast T1-weighted TWIST acquisitions during the inflow of contrast, and a T2 weighted Dixon acquisition. In total, 42 malignant (32 invasive ductal carcinomas, 4 invasive lobular carcinomas, 1 ductal carcinoma in situ and 5 other malignant lesions) and 12 benign lesions were detected on the contrast-enhanced series. Malignant lesions had a mean MRI size of 18.7 mm +- 15.1 mm (range: 3 - 92 mm) and benign lesions had a mean size of 5.9 mm +- 3.8 mm (range: 3 - 15 mm). Four dedicated breast radiologists (4 to 15 years of experience with breast MRI) independently scored both sequences for overall IQ (1: extremely poor to 9: excellent). All lesions were also independently evaluated for conspicuity (1: not visible, 2: visible if location is given, 3: visible). Statistical analysis was performed in SPSS using Generalized Linear Models and the Wilcoxon signed-rank test. - - Results - Overall IQ was significantly higher for the conventional rs-DWI-EPI (Mean +- SD: 5.5 +- 1.9) than for the SMS-ss-DWI-EPI (Mean +- SD: 4.2 +- 2.0) (p=0.002). Lesion conspicuity scores were significantly higher for SMS-ss-DWI-EPI (p=0.009). Benign lesions had similar conspicuity with both sequences while malignant lesions had significantly higher conspicuity with SMS-ss-DWI-EPI (p=0.041) (for example, see Figure 1). - Infolding and ghosting artifacts were scored as disturbing or worse by 2 or more radiologists in 6 and 15 cases, for Resolve and SMS respectively. Distortion artifacts were scored as disturbing or worse in 4 and 17 cases, respectively. + The aim of this study was to compare a prototype simultaneous multi-slice single-shot echo planar imaging (SMS-ss-DWI-EPI) sequence with conventional readout-segmented echo-planar imaging (rs-DWI-EPI) for diffusion-weighted imaging of the breast at 3T magnetic resonance imaging (MRI). A reader study was conducted to evaluate image quality, lesion conspicuity and BI-RADS score. Our results show that although the image quality with the conventional rs-DWI-EPI is superior, malignant lesions have improved visibility with the SMS-ss-DWI-EPI sequence. + + Introduction + The addition of diffusion-weighted imaging (DWI) to contrast-enhanced breast MRI improves the classification of breast lesions, which leads in turn to an increased positive predictive value of biopsies. Consequently, DWI with evaluation of the corresponding apparent diffusion coefficient (ADC) is included in most state-of-the-art breast MRI protocols. The echo train of the readout-segmented echo-planar imaging-based DWI sequence (rs-DWI-EPI) was shortened to reduce distortion and improve the resulting image quality. However, this sequence results in a lower signal-to-noise ratio (SNR) than single-shot echo planar imaging (ss-EPI). In practice, detection of lesions on DWI is often problematic due to a relatively low lesion conspicuity. To improve the detectability of lesions and the speed of acquisition, a prototype DWI sequence, the simultaneous multi-slice single-shot DWI-EPI (SMS-ss-DWI-EPI), was developed. In this study, we compare this prototype sequence with rs-DWI-EPI at 3T, in terms of image quality (IQ), lesion conspicuity, and the presence of artifacts. + + Methods + From September 2017 to December 2018, 25 women with known breast cancer or suspicious breast lesions were included, after providing signed informed consent. Women were scanned with the conventional rs-DWI-EPI and the SMS-ss-DWI-EPI during the same clinical examination on a 3T MAGNETOM Skyra system (Siemens Healthcare, Erlangen, Germany) using a 16-channel bilateral breast coil. Parameters of the rs-DWI-EPI sequence were: TR: 5450 ms, TE: 57 ms, FoV: 340 mm, voxel size: 1.2x1.2x5 mm , acquisition time: 4:23 min, b-values: 50, 850 s/mm , SPAIR fat suppression. Parameters of the SMS-ss-DWI-EPI sequence were: TR: 4000 ms, TE: 70 ms, FoV: 360 mm, voxel size: 0.9(i)x0.9(i)x4 mm, acquisition time: 2:45 min, b-values: 50, 400, 800 s/mm , SPAIR fat suppression. In addition, the clinical protocol included one pre- and five post-contrast regular T1-weighted Dixon acquisitions, ultrafast T1-weighted TWIST acquisitions during the inflow of contrast, and a T2 weighted Dixon acquisition. In total, 42 malignant (32 invasive ductal carcinomas, 4 invasive lobular carcinomas, 1 ductal carcinoma in situ and 5 other malignant lesions) and 12 benign lesions were detected on the contrast-enhanced series. Malignant lesions had a mean MRI size of 18.7 mm +- 15.1 mm (range: 3 - 92 mm) and benign lesions had a mean size of 5.9 mm +- 3.8 mm (range: 3 - 15 mm). Four dedicated breast radiologists (4 to 15 years of experience with breast MRI) independently scored both sequences for overall IQ (1: extremely poor to 9: excellent). All lesions were also independently evaluated for conspicuity (1: not visible, 2: visible if location is given, 3: visible). Statistical analysis was performed in SPSS using Generalized Linear Models and the Wilcoxon signed-rank test. + + Results + Overall IQ was significantly higher for the conventional rs-DWI-EPI (Mean +- SD: 5.5 +- 1.9) than for the SMS-ss-DWI-EPI (Mean +- SD: 4.2 +- 2.0) (p=0.002). Lesion conspicuity scores were significantly higher for SMS-ss-DWI-EPI (p=0.009). Benign lesions had similar conspicuity with both sequences while malignant lesions had significantly higher conspicuity with SMS-ss-DWI-EPI (p=0.041) (for example, see Figure 1). + Infolding and ghosting artifacts were scored as disturbing or worse by 2 or more radiologists in 6 and 15 cases, for Resolve and SMS respectively. Distortion artifacts were scored as disturbing or worse in 4 and 17 cases, respectively. + + Discussion: Although the conventional rs-DWI-EPI sequence results in better IQ, in general ss-EPI results in a higher SNR, which may lead to better visibility of malignant lesions with SMS-ss-DWI-EPI. This might eventually improve the clinical value of DWI in addition to contrast enhanced breast MRI. Simultaneous Multi-Slice (SMS) ensures that slices are excited simultaneously with a multiband pulse, which leads to a reduced acquisition time. In our protocol, the combination of ss-EPI and SMS results in a higher spatial resolution while still having a shorter acquisition time than the conventional sequence. The higher achievable spatial resolution may be an important factor for the improved lesion visibility, and conspicuity of malignant lesions. This may make the SMS approach suitable for fast screening and diagnosis of breast cancer. Still, further development of the SMS-ss-DWI-EPI sequence is needed for improved IQ, decreased presence of artifacts and even better lesion conspicuity. + + Conclusion + Despite the perceived poorer image quality and the more disturbing presence of artifacts in the SMS-ss-DWI-EPI sequence, malignant lesions are better visualized using this sequence. When image quality and conspicuity are further improved, this technique might enable improved lesion detection on unenhanced diffusion weighted breast MRI.}, + optnote = {DIAG}, + year = {2020}, +} - Discussion: Although the conventional rs-DWI-EPI sequence results in better IQ, in general ss-EPI results in a higher SNR, which may lead to better visibility of malignant lesions with SMS-ss-DWI-EPI. This might eventually improve the clinical value of DWI in addition to contrast enhanced breast MRI. Simultaneous Multi-Slice (SMS) ensures that slices are excited simultaneously with a multiband pulse, which leads to a reduced acquisition time. In our protocol, the combination of ss-EPI and SMS results in a higher spatial resolution while still having a shorter acquisition time than the conventional sequence. The higher achievable spatial resolution may be an important factor for the improved lesion visibility, and conspicuity of malignant lesions. This may make the SMS approach suitable for fast screening and diagnosis of breast cancer. Still, further development of the SMS-ss-DWI-EPI sequence is needed for improved IQ, decreased presence of artifacts and even better lesion conspicuity. +@article{Sand20a, + author = {Sanderink, W.B.G. and Caballo, M. and Strobbe, L.J.A. and Bult, P. and Vreuls, W. and Venderink, D.J. and Sechopoulos, I. and Karssemeijer, N. and Mann, R.M.}, + title = {~~Reliability of MRI tumor size measurements for minimal invasive treatment selection in small breast cancers}, + doi = {10.1016/j.ejso.2020.04.038}, + year = {2020}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejso.2020.04.038}, + file = {Sand20a.pdf:pdf\\Sand20a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Surgical Oncology}, + automatic = {yes}, + citation-count = {3}, + pages = {1463-1470}, + volume = {46}, + pmid = {32737712}, +} - Conclusion - Despite the perceived poorer image quality and the more disturbing presence of artifacts in the SMS-ss-DWI-EPI sequence, malignant lesions are better visualized using this sequence. When image quality and conspicuity are further improved, this technique might enable improved lesion detection on unenhanced diffusion weighted breast MRI.}, - optnote = {DIAG}, +@article{Sand20a, + author = {Sanderink, W.B.G. and Caballo, M. and Strobbe, L.J.A. and Bult, P. and Vreuls, W. and Venderink, D.J. and Sechopoulos, I. and Karssemeijer, N. and Mann, R.M.}, + title = {~~Reliability of MRI tumor size measurements for minimal invasive treatment selection in small breast cancers}, + doi = {10.1016/j.ejso.2020.04.038}, year = {2020}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejso.2020.04.038}, + file = {Sand20a.pdf:pdf\\Sand20a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Surgical Oncology}, + automatic = {yes}, + citation-count = {3}, + pages = {1463-1470}, + volume = {46}, + pmid = {32737712}, +} + +@article{Sand21, + author = {Sanderink, Wendelien B.G. and Teuwen, Jonas and Appelman, Linda and Moy, Linda and Heacock, Laura and Weiland, Elisabeth and Karssemeijer, Nico and Baltzer, Pascal A.T. and Sechopoulos, Ioannis and Mann, Ritse M.}, + title = {~~Comparison of simultaneous multi-slice single-shot DWI to readout-segmented DWI for evaluation of breast lesions at 3T MRI}, + doi = {10.1016/j.ejrad.2021.109626}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejrad.2021.109626}, + file = {Sand21.pdf:pdf\\Sand21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + automatic = {yes}, + citation-count = {7}, + pages = {109626}, + volume = {138}, + pmid = {33711569}, } @article{Satt22, @@ -20814,6 +25997,9 @@ @article{Satt22 optnote = {DIAG, RADIOLOGY}, pmid = {35293692}, year = {2022}, + ss_id = {a9b2faab3a7195ee1deacda52004b4cd396f427a}, + all_ss_ids = {['a9b2faab3a7195ee1deacda52004b4cd396f427a']}, + gscites = {6}, } @article{Sch03a, @@ -21109,6 +26295,9 @@ @inproceedings{Scha13 file = {Scha13.pdf:pdf\\Scha13.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {3}, + ss_id = {4f1105d82ae39f9705da50fc98f4b545d80723dc}, + all_ss_ids = {['4f1105d82ae39f9705da50fc98f4b545d80723dc']}, + gscites = {3}, } @article{Scha13a, @@ -21126,8 +26315,10 @@ @article{Scha13a pmid = {24113431}, month = {12}, gsid = {8460371450451194013}, - gscites = {19}, + gscites = {27}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/122870}, + ss_id = {c78606bf03806c66b3d24122d564173a448181ed}, + all_ss_ids = {['c78606bf03806c66b3d24122d564173a448181ed']}, } @conference{Scha13b, @@ -21172,8 +26363,10 @@ @article{Scha14 pmid = {24481755}, month = {1}, gsid = {16185250871510695519}, - gscites = {16}, + gscites = {18}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/268940}, + ss_id = {dabf1c07425730421cd8e1bf4ad9d7fffb7b036f}, + all_ss_ids = {['dabf1c07425730421cd8e1bf4ad9d7fffb7b036f']}, } @article{Scha14a, @@ -21191,6 +26384,9 @@ @article{Scha14a pmid = {24635675}, month = {7}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/137680}, + ss_id = {6b18013b756f365ee486e752272327a554ea841a}, + all_ss_ids = {['6b18013b756f365ee486e752272327a554ea841a', 'b2b0b7891f02492fe0452d6dea3297c57a85476b']}, + gscites = {70}, } @article{Scha14b, @@ -21210,6 +26406,8 @@ @article{Scha14b gsid = {6744117835177351208}, gscites = {8}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/277816}, + ss_id = {bd9cd87e942d44cf6bd24a9a1105b4ec6c0883d5}, + all_ss_ids = {['bd9cd87e942d44cf6bd24a9a1105b4ec6c0883d5']}, } @article{Scha14c, @@ -21228,6 +26426,8 @@ @article{Scha14c month = {5}, gsid = {15579013926935901771}, gscites = {4}, + ss_id = {c20ebcc69b4c426fa0231ce719576ed7ba7b0629}, + all_ss_ids = {['c20ebcc69b4c426fa0231ce719576ed7ba7b0629']}, } @article{Scha14d, @@ -21245,6 +26445,8 @@ @article{Scha14d month = {4}, gsid = {4547693503675497615}, gscites = {6}, + ss_id = {51d271d58ca1492055fa83f97019c34672c77dd0}, + all_ss_ids = {['51d271d58ca1492055fa83f97019c34672c77dd0']}, } @article{Scha14e, @@ -21262,7 +26464,9 @@ @article{Scha14e pmid = {24635675}, month = {7}, gsid = {773280616186312110}, - gscites = {35}, + gscites = {70}, + ss_id = {6b18013b756f365ee486e752272327a554ea841a}, + all_ss_ids = {['6b18013b756f365ee486e752272327a554ea841a', 'b2b0b7891f02492fe0452d6dea3297c57a85476b']}, } @conference{Scha14f, @@ -21298,7 +26502,9 @@ @article{Scha14h pmid = {25279774}, month = {10}, gsid = {10859929995411569077}, - gscites = {7}, + gscites = {16}, + ss_id = {76651ee9770b111f53dbcb92b3cb1174d3760073}, + all_ss_ids = {['76651ee9770b111f53dbcb92b3cb1174d3760073']}, } @article{Scha14i, @@ -21315,6 +26521,9 @@ @article{Scha14i number = {12}, pmid = {25503518}, month = {12}, + ss_id = {711091a5f3498c65061d5cff533169a7b7cedf8a}, + all_ss_ids = {['711091a5f3498c65061d5cff533169a7b7cedf8a']}, + gscites = {2}, } @phdthesis{Scha15, @@ -21347,8 +26556,10 @@ @article{Scha16 optnote = {DIAG}, pmid = {26783697}, gsid = {4979950243250816911}, - gscites = {3}, + gscites = {7}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/167509}, + ss_id = {36f4a3250668a84acc6e3eff9f9b4f496dab24df}, + all_ss_ids = {['36f4a3250668a84acc6e3eff9f9b4f496dab24df']}, } @article{Scha20, @@ -21362,6 +26573,43 @@ @article{Scha20 file = {Scha20.pdf:pdf\\Scha20.pdf:PDF}, optnote = {DIAG, INPRESS, RADIOLOGY}, pmid = {32787701}, + ss_id = {58af0cc4afb228ed71f5fee4bc2c8e5481d020b4}, + all_ss_ids = {['58af0cc4afb228ed71f5fee4bc2c8e5481d020b4']}, + gscites = {68}, +} + +@article{Scha21, + author = {Schaefer-Prokop, Cornelia and Prokop, Mathias}, + title = {~~Chest Radiography in COVID-19: No Role in Asymptomatic and Oligosymptomatic Disease}, + doi = {10.1148/radiol.2020204038}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2020204038}, + file = {Scha21.pdf:pdf\\Scha21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + automatic = {yes}, + citation-count = {3}, + pages = {E156-E157}, + volume = {298}, + pmid = {33725189}, +} + +@article{Scha21, + author = {Schaefer-Prokop, Cornelia and Prokop, Mathias}, + title = {~~Chest Radiography in COVID-19: No Role in Asymptomatic and Oligosymptomatic Disease}, + doi = {10.1148/radiol.2020204038}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2020204038}, + file = {Scha21.pdf:pdf\\Scha21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + automatic = {yes}, + citation-count = {3}, + pages = {E156-E157}, + volume = {298}, + pmid = {33725189}, } @article{Scha22, @@ -21376,13 +26624,15 @@ @article{Scha22 abstract = {Abstract Background The Psoriasis Area and Severity Index (PASI) score is commonly used in clinical practice and research to monitor disease severity and determine treatment efficacy. Automating the PASI score with deep learning algorithms, like Convolutional Neural Networks (CNNs), could enable objective and efficient PASI scoring. Objectives To assess the performance of image-based automated PASI scoring in anatomical regions by CNNs and compare the performance of CNNs to image-based scoring by physicians. Methods Imaging series were matched to PASI subscores determined in real life by the treating physician. CNNs were trained using standardized imaging series of 576 trunk, 614 arm and 541 leg regions. CNNs were separately trained for each PASI subscore (erythema, desquamation, induration and area) in each anatomical region (trunk, arms and legs). The head region was excluded for anonymity. Additionally, PASI-trained physicians retrospectively determined image-based subscores on the test set images of the trunk. Agreement with the real-life scores was determined with the intraclass correlation coefficient (ICC) and compared between the CNNs and physicians. Results Intraclass correlation coefficients between the CNN and real-life scores of the trunk region were 0.616, 0.580, 0.580 and 0.793 for erythema, desquamation, induration and area, respectively, with similar results for the arms and legs region. PASI-trained physicians (N = 5) were in moderate-good agreement (ICCs 0.706-0.793) with each other for image-based PASI scoring of the trunk region. ICCs between the CNN and real-life scores were slightly higher for erythema (0.616 vs. 0.558), induration (0.580 vs. 0.573) and area scoring (0.793 vs. 0.694) than image-based scoring by physicians. Physicians slightly outperformed the CNN on desquamation scoring (0.580 vs. 0.589). Conclusions Convolutional Neural Networks have the potential to automatically and objectively perform image-based PASI scoring at an anatomical region level. For erythema, desquamation and induration scoring, CNNs performed similar to physicians, while for area scoring CNNs outperformed physicians on image-based PASI scoring.}, year = {2022}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/248278}, + ss_id = {fe0311bb9d578c2811912c020045f1c3b7345543}, + all_ss_ids = {['fe0311bb9d578c2811912c020045f1c3b7345543']}, + gscites = {14}, } @article{Scha22a, abstract = {Artificial intelligence (AI) applications for chest radiography and chest CT are among the most developed applications in radiology. More than 40 certified AI products are available for chest radiography or chest CT. These AI products cover a wide range of abnormalities, including pneumonia, pneumothorax and lung cancer. Most applications are aimed at detecting disease, complemented by products that characterize or quantify tissue. At present, none of the thoracic AI products is specifically designed for the pediatric population. However, some products developed to detect tuberculosis in adults are also applicable to children. Software is under development to detect early changes of cystic fibrosis on chest CT, which could be an interesting application for pediatric radiology. In this review, we give an overview of current AI products in thoracic radiology and cover recent literature about AI in chest radiography, with a focus on pediatric radiology. We also discuss possible pediatric applications.}, author = {Schalekamp, Steven and Klein, Willemijn M and van Leeuwen, Kicky G}, doi = {10.1007/s00247-021-05146-0}, - issn = {1432-1998}, journal = PEDRAD, number = {11}, pages = {2120--2130}, @@ -21390,10 +26640,60 @@ @article{Scha22a url = {https://doi.org/10.1007/s00247-021-05146-0}, volume = {52}, year = {2022}, - optnote = {RADIOLOGY, DIAG}, + optnote = {DIAG, RADIOLOGY}, file = {Scha22a.pdf:pdf\\Scha22a.pdf:PDF}, } +@article{Scha22b, + author = {Scharm, Sarah C. and Schaefer-Prokop, Cornelia and Willmann, Moritz and Vogel-Claussen, Jens and Knudsen, Lars and Jonigk, Danny and Fuge, Jan and Welte, Tobias and Wacker, Frank and Prasse, Antje and Shin, Hoen-oh}, + title = {~~Increased regional ventilation as early imaging marker for future disease progression of interstitial lung disease: a feasibility study}, + doi = {10.1007/s00330-022-08702-w}, + year = {2022}, + abstract = {Abstract + Objectives + Idiopathic pulmonary fibrosis (IPF) is a disease with a poor prognosis and a highly variable course. Pathologically increased ventilation--accessible by functional CT--is discussed as a potential predecessor of lung fibrosis. The purpose of this feasibility study was to investigate whether increased regional ventilation at baseline CT and morphological changes in the follow-up CT suggestive for fibrosis indeed occur in spatial correspondence. + + Methods + In this retrospective study, CT scans were performed at two time points between September 2016 and November 2020. Baseline ventilation was divided into four categories ranging from low, normal to moderately, and severely increased (C1-C4). Correlation between baseline ventilation and volume and density change at follow-up was investigated in corresponding voxels. The significance of the difference of density and volume change per ventilation category was assessed using paired t-tests with a significance level of p <= 0.05. The analysis was performed separately for normal (NAA) and high attenuation areas (HAA). + + Results + The study group consisted of 41 patients (73 +- 10 years, 36 men). In both NAA and HAA, significant increases of density and loss of volume were seen in areas of severely increased ventilation (C4) at baseline compared to areas of normal ventilation (C2, p < 0.001). In HAA, morphological changes were more heterogeneous compared to NAA. + + Conclusion + Functional CT assessing the extent and distribution of lung parenchyma with pathologically increased ventilation may serve as an imaging marker to prospectively identify lung parenchyma at risk for developing fibrosis. + + Key Points + * Voxelwise correlation of serial CT scans suggests spatial correspondence between increased ventilation at baseline and structural changes at follow-up. + * Regional assessment of pathologically increased ventilation at baseline has the potential to prospectively identify tissue at risk for developing fibrosis. + * Presence and extent of pathologically increased ventilation may serve as an early imaging marker of disease activity. + }, + url = {http://dx.doi.org/10.1007/s00330-022-08702-w}, + file = {Scha22b.pdf:pdf\\Scha22b.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {4}, + pages = {6046-6057}, + volume = {32}, + pmid = {35357537}, +} + +@article{Scha23, + author = {Scharm, Sarah C. and Schaefer-Prokop, Cornelia and Winther, Hinrich B. and Huisinga, Carolin and Werncke, Thomas and Vogel-Claussen, Jens and Wacker, Frank K. and Shin, Hoen-oh}, + title = {~~Regional Pulmonary Morphology and Function: Photon-counting CT Assessment}, + doi = {10.1148/radiol.230318}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.230318}, + file = {Scha23.pdf:pdf\\Scha23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + automatic = {yes}, + citation-count = {0}, + volume = {308}, + pmid = {37432088}, +} + @article{Scha90, author = {Schaefer, C. and Prokop, M. and Galanski, M.}, title = {[Drug-induced changes in the lungs]}, @@ -21499,6 +26799,23 @@ @article{Scha99 year = {1999}, } +@article{Sche22, + author = {Scheeringa, Ren\'{e} and Bonnefond, Mathilde and van Mourik, Tim and Jensen, Ole and Norris, David G and Koopmans, Peter J}, + title = {~~Relating neural oscillations to laminar fMRI connectivity in visual cortex}, + doi = {10.1093/cercor/bhac154}, + year = {2022}, + abstract = {AbstractLaminar functional magnetic resonance imaging (fMRI) holds the potential to study connectivity at the laminar level in humans. Here we analyze simultaneously recorded electroencephalography (EEG) and high-resolution fMRI data to investigate how EEG power modulations, induced by a task with an attentional component, relate to changes in fMRI laminar connectivity between and within brain regions in visual cortex. Our results indicate that our task-induced decrease in beta power relates to an increase in deep-to-deep layer coupling between regions and to an increase in deep/middle-to-superficial layer connectivity within brain regions. The attention-related alpha power decrease predominantly relates to reduced connectivity between deep and superficial layers within brain regions, since, unlike beta power, alpha power was found to be positively correlated to connectivity. We observed no strong relation between laminar connectivity and gamma band oscillations. These results indicate that especially beta band, and to a lesser extent, alpha band oscillations relate to laminar-specific fMRI connectivity. The differential effects for alpha and beta bands indicate that they relate to different feedback-related neural processes that are differentially expressed in intra-region laminar fMRI-based connectivity.}, + url = {http://dx.doi.org/10.1093/cercor/bhac154}, + file = {Sche22.pdf:pdf\\Sche22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cerebral Cortex}, + automatic = {yes}, + citation-count = {1}, + pages = {1537-1549}, + volume = {33}, + pmid = {35512361}, +} + @inproceedings{Schi03, author = {A. M. R. Schilham and B. van Ginneken and M. Loog}, title = {Multi-scale nodule detection in chest radiographs}, @@ -21513,6 +26830,8 @@ @inproceedings{Schi03 optnote = {DIAG, RADIOLOGY}, gsid = {16504552768822728760}, gscites = {36}, + ss_id = {20edec8455ced02cf81cd9b36da6d0c1e78466c6}, + all_ss_ids = {['20edec8455ced02cf81cd9b36da6d0c1e78466c6']}, } @conference{Schi03a, @@ -21559,6 +26878,9 @@ @inproceedings{Schi05b file = {Schi05b.pdf:pdf\\Schi05b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, month = {4}, + ss_id = {9631891250b66696348d177974c08c5ff523bde9}, + all_ss_ids = {['9631891250b66696348d177974c08c5ff523bde9']}, + gscites = {0}, } @inproceedings{Schi06, @@ -21576,6 +26898,8 @@ @inproceedings{Schi06 month = {3}, gsid = {426748162498317427}, gscites = {2}, + ss_id = {89d57c0dfb6c4a5d0f883a96648569f605ba8b61}, + all_ss_ids = {['89d57c0dfb6c4a5d0f883a96648569f605ba8b61']}, } @article{Schi06a, @@ -21593,6 +26917,8 @@ @article{Schi06a month = {4}, gsid = {6010863254825410177}, gscites = {89}, + ss_id = {ccf65024e744697617e208593fd1d47f9c27d9aa}, + all_ss_ids = {['ccf65024e744697617e208593fd1d47f9c27d9aa']}, } @article{Schi06b, @@ -21610,6 +26936,8 @@ @article{Schi06b month = {4}, gsid = {1234054976175339261}, gscites = {167}, + ss_id = {5db204b00f065be22c671e9d8b795d20b3c43281}, + all_ss_ids = {['5db204b00f065be22c671e9d8b795d20b3c43281']}, } @inproceedings{Schi08, @@ -21625,6 +26953,8 @@ @inproceedings{Schi08 optnote = {DIAG, RADIOLOGY}, gsid = {17302526749829221638}, gscites = {10}, + ss_id = {ce9b7e56072a7ffdebdd6516a3044975ae38ac73}, + all_ss_ids = {['ce9b7e56072a7ffdebdd6516a3044975ae38ac73']}, } @inproceedings{Schi09, @@ -21641,7 +26971,9 @@ @inproceedings{Schi09 optnote = {DIAG, RADIOLOGY}, month = {2}, gsid = {6869130013441123416}, - gscites = {13}, + gscites = {15}, + ss_id = {7fcd3d506310b5c7deb6858027dfd9e6a3f40258}, + all_ss_ids = {['7fcd3d506310b5c7deb6858027dfd9e6a3f40258']}, } @inproceedings{Schi10, @@ -21657,6 +26989,8 @@ @inproceedings{Schi10 optnote = {DIAG, RADIOLOGY}, gsid = {5930966169749141545}, gscites = {2}, + ss_id = {cbf00889d89fe8c70d66ad190cc94c3ed25f7f42}, + all_ss_ids = {['cbf00889d89fe8c70d66ad190cc94c3ed25f7f42']}, } @inproceedings{Schi11, @@ -21674,6 +27008,8 @@ @inproceedings{Schi11 month = {3}, gsid = {17594284512436240042}, gscites = {2}, + ss_id = {3d3ca5dc423c0265941d1dc9d8884b304a1ee4fe}, + all_ss_ids = {['3d3ca5dc423c0265941d1dc9d8884b304a1ee4fe']}, } @article{Schi11a, @@ -21691,7 +27027,9 @@ @article{Schi11a pmid = {21737868}, month = {7}, gsid = {15866260991456062910}, - gscites = {18}, + gscites = {20}, + ss_id = {182850da64dee765b90588045f836b2a20077a2e}, + all_ss_ids = {['182850da64dee765b90588045f836b2a20077a2e']}, } @article{Schi13, @@ -21710,6 +27048,8 @@ @article{Schi13 month = {12}, gsid = {1205294412888661201}, gscites = {40}, + ss_id = {4a281df10b34a521ccfe28db516bc67e90df41af}, + all_ss_ids = {['4a281df10b34a521ccfe28db516bc67e90df41af']}, } @article{Schi13a, @@ -21727,7 +27067,9 @@ @article{Schi13a pmid = {23556896}, month = {3}, gsid = {4270628869848954159}, - gscites = {38}, + gscites = {41}, + ss_id = {6fd7b098a6d041926be75beb4fc1835f2c695d7d}, + all_ss_ids = {['6fd7b098a6d041926be75beb4fc1835f2c695d7d']}, } @phdthesis{Schi14, @@ -21746,22 +27088,22 @@ @mastersthesis{Schi20 author = {Martijn Schilpzand}, title = {Automatic Placenta Localisation from Ultrasound Imaging in a Resource-Limited Setting}, abstract = {Placenta previa and low-lying placenta are dangerous conditions that can cause severe - maternal and fetal complications. Obstetric ultrasound imaging is commonly used to - detect these maternal risk factors. Unfortunately, low-income countries suffer from a - shortage of trained sonographers to perform ultrasound examinations. To address this - problem, this study presents an algorithm to automatically detect low-lying placenta or - placenta previa from ultrasound data acquired with a standardized acquisition protocol. - This acquisition protocol can be taught to any healthcare worker within two hours. The - detection algorithm was optimized for performance and efficiency so that it can run on - a smartphone in combination with low-cost ultrasound equipment. The dataset used - in this study originates from St. Luke's hospital in Wolisso, Ethiopia and was acquired - with a low-cost ultrasound device. The detection algorithm consisted of two parts. First, - the placenta was segmented by a deep learning model with a U-Net architecture. This - segmentation model achieved a median test Dice of 0.835 on 2D ultrasound images. - Then, the segmentation data was used as input for a binary classifier which classified a - case as either normal placenta or as a class which includes both low-lying placenta and - placenta previa. The classification model achieved a sensitivity of 85% and a specificity - of 86%.}, + maternal and fetal complications. Obstetric ultrasound imaging is commonly used to + detect these maternal risk factors. Unfortunately, low-income countries suffer from a + shortage of trained sonographers to perform ultrasound examinations. To address this + problem, this study presents an algorithm to automatically detect low-lying placenta or + placenta previa from ultrasound data acquired with a standardized acquisition protocol. + This acquisition protocol can be taught to any healthcare worker within two hours. The + detection algorithm was optimized for performance and efficiency so that it can run on + a smartphone in combination with low-cost ultrasound equipment. The dataset used + in this study originates from St. Luke's hospital in Wolisso, Ethiopia and was acquired + with a low-cost ultrasound device. The detection algorithm consisted of two parts. First, + the placenta was segmented by a deep learning model with a U-Net architecture. This + segmentation model achieved a median test Dice of 0.835 on 2D ultrasound images. + Then, the segmentation data was used as input for a binary classifier which classified a + case as either normal placenta or as a class which includes both low-lying placenta and + placenta previa. The classification model achieved a sensitivity of 85% and a specificity + of 86%.}, file = {Schi20.pdf:pdf/Schi20.pdf:PDF}, optnote = {DIAG}, school = {Radboud University Nijmegen}, @@ -21782,6 +27124,9 @@ @article{Schi22 optnote = {DIAG, RADIOLOGY}, pmid = {35063289}, year = {2022}, + ss_id = {7383ab9582f0e50753a0571f6e4d759f99f49d27}, + all_ss_ids = {['2fe9af8a6b41fc9db41e76621f037aac453ab433', '7383ab9582f0e50753a0571f6e4d759f99f49d27']}, + gscites = {5}, } @conference{Schm12, @@ -21843,7 +27188,9 @@ @article{Scho11a pmid = {21956697}, month = {9}, gsid = {15023985581200351958}, - gscites = {53}, + gscites = {71}, + ss_id = {1ac1810f6db89758612fd372617c71f07a7b018a}, + all_ss_ids = {['1ac1810f6db89758612fd372617c71f07a7b018a']}, } @article{Scho13, @@ -21861,6 +27208,8 @@ @article{Scho13 pmid = {23598304}, gsid = {6683656125149514426}, gscites = {7}, + ss_id = {696f9c08ce42d9e71407d05987e5f3aa62fad108}, + all_ss_ids = {['696f9c08ce42d9e71407d05987e5f3aa62fad108']}, } @article{Scho13a, @@ -21878,8 +27227,10 @@ @article{Scho13a pmid = {23883209}, month = {8}, gsid = {5997033147149702974}, - gscites = {29}, + gscites = {30}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/118196}, + ss_id = {bec0446b87854ea9f5b0210565e3c49ace87f24e}, + all_ss_ids = {['bec0446b87854ea9f5b0210565e3c49ace87f24e']}, } @article{Scho13b, @@ -21897,8 +27248,10 @@ @article{Scho13b pmid = {24278264}, month = {11}, gsid = {14511785431496206226}, - gscites = {17}, + gscites = {29}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/125893}, + ss_id = {f6f881961e305ee13ba292d599969a2ad8d1edcf}, + all_ss_ids = {['f6f881961e305ee13ba292d599969a2ad8d1edcf']}, } @phdthesis{Scho14b, @@ -21928,7 +27281,8 @@ @article{Scho14c pmid = {25431271}, month = {11}, gsid = {16713008169098417611,9948573486239255340}, - gscites = {55}, + gscites = {86}, + all_ss_ids = {['2fa5f435f41242e8ef053e94ae7af400014ca7d3', '6ca452a24dbe147205850148d98163481aa22dc6']}, } @article{Scho14d, @@ -21945,6 +27299,9 @@ @article{Scho14d number = {2}, pmid = {25287262}, month = {10}, + ss_id = {c00533c3203b54405ba3192167c83a080a536ff1}, + all_ss_ids = {['c00533c3203b54405ba3192167c83a080a536ff1']}, + gscites = {56}, } @article{Scho14f, @@ -21962,7 +27319,9 @@ @article{Scho14f pmid = {25413965}, month = {11}, gsid = {2011905741890872805}, - gscites = {24}, + gscites = {26}, + ss_id = {45a0d554907e10b52846ac7757e24f9ddc9a9d4e}, + all_ss_ids = {['45a0d554907e10b52846ac7757e24f9ddc9a9d4e']}, } @conference{Scho15a, @@ -21974,7 +27333,8 @@ @conference{Scho15a file = {Scho15a.pdf:pdf\\Scho15a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {12372288646108507958}, - gscites = {39}, + gscites = {56}, + all_ss_ids = {['c00533c3203b54405ba3192167c83a080a536ff1']}, } @article{Scho15b, @@ -22003,6 +27363,23 @@ @conference{Scho16 optnote = {DIAG, RADIOLOGY}, } +@inproceedings{Scho23, + author = {Schouten, Daan and Litjens, Geert}, + booktitle = MI, + title = {PythoStitcher: an iterative approach for stitching digitized tissue fragments into full resolution whole-mount reconstructions}, + doi = {10.1117/12.2652512}, + pages = {1247118}, + series = {#SPIE#}, + volume = {12471}, + abstract = {In histopathology, whole-mount sections (WMS) can be utilized to capture entire cross-sections of excised tissue, thereby reducing the number of slides per case, preserving tissue context, and minimizing cutting artefacts. Additionally, the use of WMS allows for easier correlation of histology and pre-operative imaging. However, in digital pathology, WMS are not frequently acquired due to the limited availability of whole-slide scanners that can scan double-width glass slides. Moreover, whole-mounts may still be too large to fit on a single double-width glass slide, and archival material is typically stored as tissue fragments. In this work, we present PythoStitcher, an improved version of the AutoStitcher algorithm for automatically stitching multiple tissue fragments and constructing a full-resolution WMS. PythoStitcher is based on a genetic algorithm that iteratively optimizes the affine transformation matrix for each tissue ...}, + file = {Scho23.pdf:pdf\\Scho23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2023}, + ss_id = {69cec4aa46e33d973af972f7b78e5991d8f03601}, + all_ss_ids = {['69cec4aa46e33d973af972f7b78e5991d8f03601']}, + gscites = {0}, +} + @conference{Schr11, author = {Bruce Schroeder and Ralph Highnam and Andrew Cave and Jenny Walker and Nico Karssemeijer and Martin Yaffe and Roberta Jong and Olivier Alonzo-Proulx}, title = {At What Age Should Breast Screening Begin?}, @@ -22032,16 +27409,18 @@ @article{Schr18 pages = {626--633}, doi = {10.1136/thoraxjnl-2017-211107}, abstract = {Background: All lung cancer CT screening trials used fixed follow-up intervals, which may not be optimal. We developed new lung cancer risk models for personalising screening intervals to 1 year or 2 years, and compared these with existing models. - Methods: We included participants in the CT arm of the National Lung Screening Trial (2002-2010) who underwent a baseline scan and a first annual follow-up scan and were not diagnosed with lung cancer in the first year. True and false positives and the area under the curve of each model were calculated. Internal validation was performed using bootstrapping. - Results: Data from 24 542 participants were included in the analysis. The accuracy was 0.785, 0.693, 0.697, 0.666 and 0.727 for the polynomial, patient characteristics, diameter, Patz and PanCan models, respectively. Of the 24 542 participants included, 174 (0.71%) were diagnosed with lung cancer between the first and the second annual follow-ups. Using the polynomial model, 2558 (10.4%, 95% CI 10.0% to 10.8%), 7544 (30.7%, 30.2% to 31.3%), 10 947 (44.6%, 44.0% to 45.2%), 16 710 (68.1%, 67.5% to 68.7%) and 20 023 (81.6%, 81.1% to 92.1%) of the 24 368 participants who did not develop lung cancer in the year following the first follow-up screening round could have safely skipped it, at the expense of delayed diagnosis of 0 (0.0%, 0.0% to 2.7%), 8 (4.6%, 2.2% to 9.2%), 17 (9.8%, 6.0% to 15.4%), 44 (25.3%, 19.2% to 32.5%) and 70 (40.2%, 33.0% to 47.9%) of the 174 lung cancers, respectively. - Conclusions: The polynomial model, using both patient characteristics and baseline scan morphology, was significantly superior in assigning participants to 1-year or 2-year screening intervals. Implementing personalised follow-up intervals would enable hundreds of participants to skip a screening round per lung cancer diagnosis delayed.}, + Methods: We included participants in the CT arm of the National Lung Screening Trial (2002-2010) who underwent a baseline scan and a first annual follow-up scan and were not diagnosed with lung cancer in the first year. True and false positives and the area under the curve of each model were calculated. Internal validation was performed using bootstrapping. + Results: Data from 24 542 participants were included in the analysis. The accuracy was 0.785, 0.693, 0.697, 0.666 and 0.727 for the polynomial, patient characteristics, diameter, Patz and PanCan models, respectively. Of the 24 542 participants included, 174 (0.71%) were diagnosed with lung cancer between the first and the second annual follow-ups. Using the polynomial model, 2558 (10.4%, 95% CI 10.0% to 10.8%), 7544 (30.7%, 30.2% to 31.3%), 10 947 (44.6%, 44.0% to 45.2%), 16 710 (68.1%, 67.5% to 68.7%) and 20 023 (81.6%, 81.1% to 92.1%) of the 24 368 participants who did not develop lung cancer in the year following the first follow-up screening round could have safely skipped it, at the expense of delayed diagnosis of 0 (0.0%, 0.0% to 2.7%), 8 (4.6%, 2.2% to 9.2%), 17 (9.8%, 6.0% to 15.4%), 44 (25.3%, 19.2% to 32.5%) and 70 (40.2%, 33.0% to 47.9%) of the 174 lung cancers, respectively. + Conclusions: The polynomial model, using both patient characteristics and baseline scan morphology, was significantly superior in assigning participants to 1-year or 2-year screening intervals. Implementing personalised follow-up intervals would enable hundreds of participants to skip a screening round per lung cancer diagnosis delayed.}, file = {:pdf/Schr18.pdf:PDF;:pdf/Schr18_Appendix.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {29602813}, publisher = {{BMJ}}, gsid = {9268925672447372226}, - gscites = {13}, + gscites = {29}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/198322}, + ss_id = {032a0374513489195712dabb380d0feb3bb85d46}, + all_ss_ids = {['032a0374513489195712dabb380d0feb3bb85d46']}, } @article{Schr18b, @@ -22060,7 +27439,9 @@ @article{Schr18b pmid = {29969076}, publisher = {Radiological Society of North America ({RSNA})}, gsid = {9860123135480948586}, - gscites = {14}, + gscites = {34}, + ss_id = {a2f7d760233f22f037d17c8196ea1463597e66e8}, + all_ss_ids = {['a2f7d760233f22f037d17c8196ea1463597e66e8']}, } @conference{Schr18c, @@ -22114,8 +27495,10 @@ @article{Schr19 pmid = {30789954}, month = {2}, gsid = {7043689849476863169}, - gscites = {1}, + gscites = {3}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/202118}, + ss_id = {010f33ceac3fb9aae25cb7ef51ba0f0dda95bb24}, + all_ss_ids = {['010f33ceac3fb9aae25cb7ef51ba0f0dda95bb24']}, } @article{Schr19a, @@ -22147,7 +27530,9 @@ @article{Schr19b optnote = {DIAG, RADIOLOGY}, pmid = {31356496}, gsid = {95204898750501445}, - gscites = {2}, + gscites = {12}, + ss_id = {766b44a5bc3a4ffb30dd1b4b4d76752e8c81698e}, + all_ss_ids = {['766b44a5bc3a4ffb30dd1b4b4d76752e8c81698e']}, } @article{Schr20, @@ -22164,6 +27549,9 @@ @article{Schr20 optnote = {DIAG, RADIOLOGY}, pmid = {32685283}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/225855}, + ss_id = {0060644239845c59813a5a9122d34e1f75d18e4c}, + all_ss_ids = {['0060644239845c59813a5a9122d34e1f75d18e4c']}, + gscites = {0}, } @article{Schr20a, @@ -22179,6 +27567,9 @@ @article{Schr20a file = {Schr20a.pdf:pdf\\Schr20a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {34164285}, + ss_id = {2a32913ae752a56f9997a3869069a768f09a52e7}, + all_ss_ids = {['2a32913ae752a56f9997a3869069a768f09a52e7']}, + gscites = {28}, } @article{Schr20b, @@ -22189,6 +27580,9 @@ @article{Schr20b doi = {10.21037/atm-20-3384}, file = {Schr20b.pdf:pdf\\Schr20b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {04eda893d160910706751960e4ca9dd730f42674}, + all_ss_ids = {['04eda893d160910706751960e4ca9dd730f42674']}, + gscites = {0}, } @article{Schr20c, @@ -22205,6 +27599,9 @@ @article{Schr20c optnote = {DIAG, RADIOLOGY}, pmid = {33778597}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/229529}, + ss_id = {302fbaac065fe76b0f8d6e2bccc8c715d1ab230a}, + all_ss_ids = {['302fbaac065fe76b0f8d6e2bccc8c715d1ab230a']}, + gscites = {7}, } @article{Schr21, @@ -22220,6 +27617,9 @@ @article{Schr21 file = {Schr21.pdf:pdf\\Schr21.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {33574075}, + ss_id = {c4422edc3d7460dc858db3e23cef64136a49628e}, + all_ss_ids = {['c4422edc3d7460dc858db3e23cef64136a49628e']}, + gscites = {6}, } @phdthesis{Schr21a, @@ -22249,6 +27649,9 @@ @article{Schr21b optnote = {DIAG, RADIOLOGY}, pmid = {33866117}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/233968}, + ss_id = {e6826552a5bff552a12087552097519a614ce0c3}, + all_ss_ids = {['e6826552a5bff552a12087552097519a614ce0c3']}, + gscites = {1}, } @article{Schr21c, @@ -22264,6 +27667,26 @@ @article{Schr21c file = {Schr21c.pdf:pdf\\Schr21c.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {34200018}, + ss_id = {53e10b43522e45781bfd4763b49aebba6602c0ac}, + all_ss_ids = {['53e10b43522e45781bfd4763b49aebba6602c0ac']}, + gscites = {1}, +} + +@article{Schr21d, + author = {Schreuder, Anton and Schaefer-Prokop, Cornelia M.}, + title = {~~Beyond the AJR: "Association of the Intensity of Diagnostic Evaluation With Outcomes in Incidentally Detected Lung Nodules"}, + doi = {10.2214/ajr.21.25903}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.2214/AJR.21.25903}, + file = {Schr21d.pdf:pdf\\Schr21d.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {American Journal of Roentgenology}, + automatic = {yes}, + citation-count = {0}, + pages = {1011-1011}, + volume = {217}, + pmid = {33787287}, } @article{Schr22, @@ -22280,18 +27703,38 @@ @article{Schr22 optnote = {DIAG, RADIOLOGY}, pmid = {34649976}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/252027}, + ss_id = {956fe0cd25879aa006d9885eba32da7100b6d338}, + all_ss_ids = {['956fe0cd25879aa006d9885eba32da7100b6d338']}, + gscites = {3}, +} + +@article{Schu23, + author = {M. Schuurmans and N. Alves and Pierpaolo Vendittelli and H. Huisman and J. Hermans}, + title = {Artificial Intelligence in Pancreatic Ductal Adenocarcinoma Imaging: A Commentary on Potential Future Applications.}, + doi = {10.1053/j.gastro.2023.04.003}, + file = { Schu23.pdf:pdf\\Schu23.pdf:PDF}, + journal = {Gastroenterology}, + optnote = {DIAG, RADIOLOGY}, + pmid = {37054755}, + year = {2023}, + ss_id = {77ac189f7a47c859a4bbee196c1ac6a2ac046338}, + all_ss_ids = {['77ac189f7a47c859a4bbee196c1ac6a2ac046338']}, + gscites = {0}, } -@Article{Schuurmans22a, - author = {Schuurmans, Megan and Alv\'{a}s, Natalia and Vendittelli, Pierpaolo and Huisman, Henkjan and Hermans, John}, - title = {Setting the Research Agenda for Clinical Artificial Intelligence in Pancreatic Adenocarcinoma Imaging}, - doi = {https://doi.org/10.3390/cancers14143498}, - pages = {3498}, - url = {https://www.mdpi.com/2072-6694/14/14/3498}, +@article{Schuurmans22a, + author = {Schuurmans, Megan and Alv\'{a}s, Natalia and Vendittelli, Pierpaolo and Huisman, Henkjan and Hermans, John}, + title = {Setting the Research Agenda for Clinical Artificial Intelligence in Pancreatic Adenocarcinoma Imaging}, + doi = {https://doi.org/10.3390/cancers14143498}, + pages = {3498}, + url = {https://www.mdpi.com/2072-6694/14/14/3498}, abstract = {Pancreatic ductal adenocarcinoma (PDAC), estimated to become the second leading cause of cancer deaths in western societies by 2030, was flagged as a neglected cancer by the European Commission and the United States Congress. Due to lack of investment in research and development, combined with a complex and aggressive tumour biology, PDAC overall survival has not significantly improved the past decades. Cross-sectional imaging and histopathology play a crucial role throughout the patient pathway. However, current clinical guidelines for diagnostic workup, patient stratification, treatment response assessment, and follow-up are non-uniform and lack evidence-based consensus. Artificial Intelligence (AI) can leverage multimodal data to improve patient outcomes, but PDAC AI research is too scattered and lacking in quality to be incorporated into clinical workflows. This review describes the patient pathway and derives touchpoints for image-based AI research in collaboration with a multi-disciplinary, multi-institutional expert panel. The literature exploring AI to address these touchpoints is thoroughly retrieved and analysed to identify the existing trends and knowledge gaps. The results show absence of multi-institutional, well-curated datasets, an essential building block for robust AI applications. Furthermore, most research is unimodal, does not use state-of-the-art AI techniques, and lacks reliable ground truth. Based on this, the future research agenda for clinically relevant, image-driven AI in PDAC is proposed.}, - journal = {Cancers}, - optnote = {DIAG, RADIOLOGY}, - year = {2022}, + journal = {Cancers}, + optnote = {DIAG, RADIOLOGY}, + year = {2022}, + ss_id = {0de05b444cb65b031f018dc44309dca7f7618a7f}, + all_ss_ids = {['0de05b444cb65b031f018dc44309dca7f7618a7f']}, + gscites = {0}, } @conference{Schuurmans22b, @@ -22377,16 +27820,19 @@ @article{Sech20 title = {Artificial Intelligence for Breast Cancer Detection in Mammography: state of the art}, doi = {10.1016/j.semcancer.2020.06.002}, abstract = {Screening for breast cancer with mammography has been introduced in various countries over the last 30 years, initially using analog screen-film-based systems and, over the last 20 years, transitioning to the use of fully digital systems. With the introduction of digitization, the computer interpretation of images has been a subject of intense interest, resulting in the introduction of computer-aided detection (CADe) and diagnosis (CADx) algorithms in the early 2000's. Although they were introduced with high expectations, the potential improvement in the clinical realm failed to materialize, mostly due to the high number of false positive marks per analyzed image. - - In the last five years, the artificial intelligence (AI) revolution in computing, driven mostly by deep learning and convolutional neural networks, has also pervaded the field of automated breast cancer detection in digital mammography and digital breast tomosynthesis. Research in this area first involved comparison of its capabilities to that of conventional CADe/CADx methods, which quickly demonstrated the potential of this new technology. In the last couple of years, more mature and some commercial products have been developed, and studies of their performance compared to that of experienced breast radiologists are showing that these algorithms are on par with human-performance levels in retrospective data sets. Although additional studies, especially prospective evaluations performed in the real screening environment, are needed, it is becoming clear that AI will have an important role in the future breast cancer screening realm. Exactly how this new player will shape this field remains to be determined, but recent studies are already evaluating different options for implementation of this technology. - - The aim of this review is to provide an overview of the basic concepts and developments in the field AI for breast cancer detection in digital mammography and digital breast tomosynthesis. The pitfalls of conventional methods, and how these are, for the most part, avoided by this new technology, will be discussed. Importantly, studies that have evaluated the current capabilities of AI and proposals for how these capabilities should be leveraged in the clinical realm will be reviewed, while the questions that need to be answered before this vision becomes a reality are posed.}, + + In the last five years, the artificial intelligence (AI) revolution in computing, driven mostly by deep learning and convolutional neural networks, has also pervaded the field of automated breast cancer detection in digital mammography and digital breast tomosynthesis. Research in this area first involved comparison of its capabilities to that of conventional CADe/CADx methods, which quickly demonstrated the potential of this new technology. In the last couple of years, more mature and some commercial products have been developed, and studies of their performance compared to that of experienced breast radiologists are showing that these algorithms are on par with human-performance levels in retrospective data sets. Although additional studies, especially prospective evaluations performed in the real screening environment, are needed, it is becoming clear that AI will have an important role in the future breast cancer screening realm. Exactly how this new player will shape this field remains to be determined, but recent studies are already evaluating different options for implementation of this technology. + + The aim of this review is to provide an overview of the basic concepts and developments in the field AI for breast cancer detection in digital mammography and digital breast tomosynthesis. The pitfalls of conventional methods, and how these are, for the most part, avoided by this new technology, will be discussed. Importantly, studies that have evaluated the current capabilities of AI and proposals for how these capabilities should be leveraged in the clinical realm will be reviewed, while the questions that need to be answered before this vision becomes a reality are posed.}, file = {Sech20.pdf:pdf/Sech20.pdf:PDF}, journal = {Seminars in Cancer Biology}, optnote = {DIAG, INPRESS, RADIOLOGY}, pmid = {32531273}, year = {2020}, month = {6}, + ss_id = {4cfea5ab26c5824d616fa8e2562c0666ecee55aa}, + all_ss_ids = {['4cfea5ab26c5824d616fa8e2562c0666ecee55aa']}, + gscites = {96}, } @article{Seku20, @@ -22401,8 +27847,23 @@ @article{Seku20 file = {Seku20.pdf:pdf/Seku20.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {4525652165222884760}, - gscites = {2}, + gscites = {121}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/238698}, + ss_id = {4882a6d4e40e048921d04570bec76a217b35b25f}, + all_ss_ids = {['4882a6d4e40e048921d04570bec76a217b35b25f']}, +} + +@article{Seku20a, + author = {Sekuboyina, Anjany and Husseini, Malek E. and Bayat, Amirhossein and L\"{o}ffler, Maximilian and Liebl, Hans and Li, Hongwei and Tetteh, Giles and Kuka\v{c}ka, Jan and Payer, Christian and Stern, Darko and Urschler, Martin and Chen, Maodong and Cheng, Dalong and Lessmann, Nikolas and Hu, Yujin and Wang, Tianfu and Yang, Dong and Xu, Daguang and Ambellan, Felix and Amiranashvili, Tamaz and Ehlke, Moritz and Lamecker, Hans and Lehnert, Sebastian and Lirio, Marilia and de Olaguer, Nicol\'{a}s P\'{e}rez and Ramm, Heiko and Sahu, Manish and Tack, Alexander and Zachow, Stefan and Jiang, Tao and Ma, Xinjun and Angerman, Christoph and Wang, Xin and Brown, Kevin and Kirszenberg, Alexandre and Puybareau, \'{E}lodie and Chen, Di and Bai, Yiwei and Rapazzo, Brandon H. and Yeah, Timyoas and Zhang, Amber and Xu, Shangliang and Hou, Feng and He, Zhiqiang and Zeng, Chan and Xiangshang, Zheng and Liming, Xu and Netherton, Tucker J. and Mumme, Raymond P. and Court, Laurence E. and Huang, Zixun and He, Chenhang and Wang, Li-Wen and Ling, Sai Ho and Huynh, L\^{e} Duy and Boutry, Nicolas and Jakubicek, Roman and Chmelik, Jiri and Mulay, Supriti and Sivaprakasam, Mohanasankar and Paetzold, Johannes C. and Shit, Suprosanna and Ezhov, Ivan and Wiestler, Benedikt and Glocker, Ben and Valentinitsch, Alexander and Rempfler, Markus and Menze, Bj\"{o}rn H. and Kirschke, Jan S.}, + title = {~~VerSe: A Vertebrae Labelling and Segmentation Benchmark for Multi-detector CT Images}, + doi = {10.48550/ARXIV.2001.09193}, + year = {2020}, + abstract = {Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision-support systems for diagnosis, surgery planning, and population-based analysis on spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms towards labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel-level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, https://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the the results of this evaluation and further investigate the performance-variation at vertebra-level, scan-level, and at different fields-of-view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The content and code concerning VerSe can be accessed at: https://github.com/anjany/verse.}, + url = {https://arxiv.org/abs/2001.09193}, + file = {Seku20a.pdf:pdf\\Seku20a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2001.09193}, + automatic = {yes}, } @article{Selv06, @@ -22421,6 +27882,8 @@ @article{Selv06 month = {10}, gsid = {17201149467712925424}, gscites = {31}, + ss_id = {49c24eee5a4beae2052e774c275e3a5a2411924e}, + all_ss_ids = {['49c24eee5a4beae2052e774c275e3a5a2411924e']}, } @inproceedings{Seti13, @@ -22457,7 +27920,9 @@ @inproceedings{Seti15 number = {94141O}, month = {3}, gsid = {4260948689623411701}, - gscites = {2}, + gscites = {4}, + ss_id = {f59a55d1e7cb49f8b2062df48ecd267b24191aa8}, + all_ss_ids = {['f59a55d1e7cb49f8b2062df48ecd267b24191aa8']}, } @article{Seti15a, @@ -22476,8 +27941,10 @@ @article{Seti15a publisher = {American Association of Physicists in Medicine}, month = {9}, gsid = {14121163271329355688}, - gscites = {88}, + gscites = {139}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/154609}, + ss_id = {c973f1bff6304e0e6d4656d1379d1414929b782f}, + all_ss_ids = {['c973f1bff6304e0e6d4656d1379d1414929b782f']}, } @article{Seti16, @@ -22495,8 +27962,10 @@ @article{Seti16 pmid = {26955024}, month = {5}, gsid = {8158189024082045415}, - gscites = {605}, + gscites = {976}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/164462}, + ss_id = {06c8b4c1dd0c045bc6a08d0062c5042b5588d55b}, + all_ss_ids = {['06c8b4c1dd0c045bc6a08d0062c5042b5588d55b']}, } @article{Seti17, @@ -22515,8 +27984,10 @@ @article{Seti17 publisher = {Elsevier}, month = {12}, gsid = {2084538205545992787}, - gscites = {291}, + gscites = {784}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/179531}, + ss_id = {7975bfe13445f81edddd6e101d7d469cd20cbd89}, + all_ss_ids = {['7975bfe13445f81edddd6e101d7d469cd20cbd89']}, } @phdthesis{Seti18, @@ -22544,6 +28015,41 @@ @mastersthesis{Sheikh22a journal = {Master thesis}, } +@article{Sher22, + author = {Sherman, Mark E. and de Bel, Thomas and Heckman, Michael G. and White, Launia J. and Ogony, Joshua and Stallings-Mann, Melody and Hilton, Tracy and Degnim, Amy C. and Vierkant, Robert A. and Hoskin, Tanya and Jensen, Matthew R. and Pacheco-Spann, Laura and Henry, Jill E. and Storniolo, Anna Maria and Carter, Jodi M. and Winham, Stacey J. and Radisky, Derek C. and van der Laak, Jeroen}, + title = {~~Serum hormone levels and normal breast histology among premenopausal women}, + doi = {10.1007/s10549-022-06600-9}, + year = {2022}, + abstract = {Breast terminal duct lobular units (TDLUs) are the main source of breast cancer (BC) precursors. Higher serum concentrations of hormones and growth factors have been linked to increased TDLU numbers and to elevated BC risk, with variable effects by menopausal status. We assessed associations of circulating factors with breast histology among premenopausal women using artificial intelligence (AI) and preliminarily tested whether parity modifies associations. Pathology AI analysis was performed on 316 digital images of H&E-stained sections of normal breast tissues from Komen Tissue Bank donors ages <= 45 years to assess 11 quantitative metrics. Associations of circulating factors with AI metrics were assessed using regression analyses, with inclusion of interaction terms to assess effect modification. Higher prolactin levels were related to larger TDLU area (p < 0.001) and increased presence of adipose tissue proximate to TDLUs (p < 0.001), with less significant positive associations for acini counts (p = 0.012), dilated acini (p = 0.043), capillary area (p = 0.014), epithelial area (p = 0.007), and mononuclear cell counts (p = 0.017). Testosterone levels were associated with increased TDLU counts (p < 0.001), irrespective of parity, but associations differed by adipose tissue content. AI data for TDLU counts generally agreed with prior visual assessments. Among premenopausal women, serum hormone levels linked to BC risk were also associated with quantitative features of normal breast tissue. These relationships were suggestively modified by parity status and tissue composition. We conclude that the microanatomic features of normal breast tissue may represent a marker of BC risk.}, + url = {http://dx.doi.org/10.1007/s10549-022-06600-9}, + file = {Sher22.pdf:pdf\\Sher22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Breast Cancer Research and Treatment}, + automatic = {yes}, + citation-count = {0}, + pages = {149-158}, + volume = {194}, + pmid = {35503494}, +} + +@article{Sido23, + author = {Sidorenkov, Grigory and Stadhouders, Ralph and Jacobs, Colin and Mohamed Hoesein, Firdaus A. A. and Gietema, Hester A. and Nackaerts, Kristiaan and Saghir, Zaigham and Heuvelmans, Marjolein A. and Donker, Hylke C. and Aerts, Joachim G. and Vermeulen, Roel and Uitterlinden, Andre and Lenters, Virissa and van Rooij, Jeroen and Schaefer-Prokop, Cornelia and Groen, Harry J. M. and de Jong, Pim A. and Cornelissen, Robin and Prokop, Mathias and de Bock, Geertruida H. and Vliegenthart, Rozemarijn}, + title = {Multi-source data approach for personalized outcome prediction in lung cancer screening: update from the NELSON trial.}, + doi = {10.1007/s10654-023-00975-9}, + abstract = {Trials show that low-dose computed tomography (CT) lung cancer screening in long-term (ex-)smokers reduces lung cancer mortality. However, many individuals were exposed to unnecessary diagnostic procedures. This project aims to improve the efficiency of lung cancer screening by identifying high-risk participants, and improving risk discrimination for nodules. This study is an extension of the Dutch-Belgian Randomized Lung Cancer Screening Trial, with a focus on personalized outcome prediction (NELSON-POP). New data will be added on genetics, air pollution, malignancy risk for lung nodules, and CT biomarkers beyond lung nodules (emphysema, coronary calcification, bone density, vertebral height and body composition). The roles of polygenic risk scores and air pollution in screen-detected lung cancer diagnosis and survival will be established. The association between the AI-based nodule malignancy score and lung cancer will be evaluated at baseline and incident screening rounds. The association of chest CT imaging biomarkers with outcomes will be established. Based on these results, multisource prediction models for pre-screening and post-baseline-screening participant selection and nodule management will be developed. The new models will be externally validated. We hypothesize that we can identify 15-20% participants with low-risk of lung cancer or short life expectancy and thus prevent ~140,000 Dutch individuals from being screened unnecessarily. We hypothesize that our models will improve the specificity of nodule management by 10% without loss of sensitivity as compared to assessment of nodule size/growth alone, and reduce unnecessary work-up by 40-50%.}, + file = {Sido23.pdf:pdf\\Sido23.pdf:PDF}, + journal = {European journal of epidemiology}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36943671}, + year = {2023}, + volume = {38}, + number = {4}, + pages = {445--454}, + ss_id = {42f306e5fe8d8a22bc805f0f98e3fd2c292f06ac}, + all_ss_ids = {['42f306e5fe8d8a22bc805f0f98e3fd2c292f06ac']}, + gscites = {0}, +} + @article{Sier20, author = {M. M. Sieren and F. Brenne and A. Hering and H. Kienapfel and N. Gebauer and T. H. Oechtering and A. F\"{u}rschke and F. Wegner and E. Stahlberg and S. Heldmann and J. Barkhausen and A. Frydrychowicz}, title = {Rapid study assessment in follow-up whole-body computed tomography in patients with multiple myeloma using a dedicated bone subtraction software}, @@ -22553,16 +28059,16 @@ @article{Sier20 volume = 30, pages = {3198-3209}, abstract = {Objectives - The diagnostic reading of follow-up low-dose whole-body computed tomography (WBCT) examinations in patients with multiple myeloma (MM) is a demanding process. This study aimed to evaluate the diagnostic accuracy and benefit of a novel software program providing rapid-subtraction maps for bone lesion change detection. - - Methods - Sixty patients (66 years +- 10 years) receiving 120 WBCT examinations for follow-up evaluation of MM bone disease were identified from our imaging archive. The median follow-up time was 292 days (range 200-641 days). Subtraction maps were calculated from 2-mm CT images using a nonlinear deformation algorithm. Reading time, correctly assessed lesions, and disease classification were compared to a standard reading software program. De novo clinical reading by a senior radiologist served as the reference standard. Statistics included Wilcoxon rank-sum test, Cohen's kappa coefficient, and calculation of sensitivity, specificity, positive/negative predictive value, and accuracy. - - Results - Calculation time for subtraction maps was 84 s +- 24 s. Both readers reported exams faster using subtraction maps (reader A, 438 s +- 133 s; reader B, 1049 s +- 438 s) compared to PACS software (reader A, 534 s +- 156 s; reader B, 1486 s +- 587 s; p < 0.01). The course of disease was correctly classified by both methods in all patients. Sensitivity for lesion detection in subtraction maps/conventional reading was 92%/80% for reader A and 88%/76% for reader B. Specificity was 98%/100% for reader A and 95%/96% for reader B. - - Conclusion - A software program for the rapid-subtraction map calculation of follow-up WBCT scans has been successfully tested and seems suited for application in clinical routine. Subtraction maps significantly facilitated reading of WBCTs by reducing reading time and increasing sensitivity.}, + The diagnostic reading of follow-up low-dose whole-body computed tomography (WBCT) examinations in patients with multiple myeloma (MM) is a demanding process. This study aimed to evaluate the diagnostic accuracy and benefit of a novel software program providing rapid-subtraction maps for bone lesion change detection. + + Methods + Sixty patients (66 years +- 10 years) receiving 120 WBCT examinations for follow-up evaluation of MM bone disease were identified from our imaging archive. The median follow-up time was 292 days (range 200-641 days). Subtraction maps were calculated from 2-mm CT images using a nonlinear deformation algorithm. Reading time, correctly assessed lesions, and disease classification were compared to a standard reading software program. De novo clinical reading by a senior radiologist served as the reference standard. Statistics included Wilcoxon rank-sum test, Cohen's kappa coefficient, and calculation of sensitivity, specificity, positive/negative predictive value, and accuracy. + + Results + Calculation time for subtraction maps was 84 s +- 24 s. Both readers reported exams faster using subtraction maps (reader A, 438 s +- 133 s; reader B, 1049 s +- 438 s) compared to PACS software (reader A, 534 s +- 156 s; reader B, 1486 s +- 587 s; p < 0.01). The course of disease was correctly classified by both methods in all patients. Sensitivity for lesion detection in subtraction maps/conventional reading was 92%/80% for reader A and 88%/76% for reader B. Specificity was 98%/100% for reader A and 95%/96% for reader B. + + Conclusion + A software program for the rapid-subtraction map calculation of follow-up WBCT scans has been successfully tested and seems suited for application in clinical routine. Subtraction maps significantly facilitated reading of WBCTs by reducing reading time and increasing sensitivity.}, file = {Sier20.pdf:pdf\\Sier20.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {32048038}, @@ -22602,8 +28108,10 @@ @article{Silv18 optnote = {DIAG, RADIOLOGY}, pmid = {29543693}, gsid = {3431674930923271869}, - gscites = {13}, + gscites = {29}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/193523}, + ss_id = {6d4507a54e77f55af2bfa786ca804a13018ff273}, + all_ss_ids = {['6d4507a54e77f55af2bfa786ca804a13018ff273']}, } @article{Silv18a, @@ -22620,8 +28128,10 @@ @article{Silv18a optnote = {DIAG, RADIOLOGY}, pmid = {30026071}, gsid = {4749736138620847044}, - gscites = {15}, + gscites = {50}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/196781}, + ss_id = {4e67443737640a6d88642034266c0ce2d76d9ae7}, + all_ss_ids = {['4e67443737640a6d88642034266c0ce2d76d9ae7']}, } @conference{Silv19, @@ -22656,6 +28166,9 @@ @article{Silv20 month = {4}, number = {4}, pmid = {32997182}, + ss_id = {367878e980e109d80ad80066cec1961a940614ec}, + all_ss_ids = {['367878e980e109d80ad80066cec1961a940614ec']}, + gscites = {25}, } @article{Simp19, @@ -22664,12 +28177,31 @@ @article{Simp19 url = {https://arxiv.org/pdf/1902.09063.pdf}, abstract = {Semantic segmentation of medical images aims to associate a pixel with a label in a medical image without human initialization. The success of semantic segmentation algorithms is contingent on the availability of high-quality imaging data with corresponding labels provided by experts. We sought to create a large collection of annotated medical image datasets of various clinically relevant anatomies available under open source license to facilitate the development of semantic segmentation algorithms. Such a resource would allow: 1) objective assessment of general-purpose segmentation methods through comprehensive benchmarking and 2) open and free access to medical image data for any researcher interested in the problem domain. Through a multi-institutional effort, we generated a large, curated dataset representative of several highly variable segmentation tasks that was used in a crowd-sourced challenge - the Medical Segmentation Decathlon held during the 2018 Medical Image Computing and Computer Aided Interventions Conference in Granada, Spain. Here, we describe these ten labeled image datasets so that these data may be effectively reused by the research community.}, file = {:pdf/Simp19.pdf:PDF}, - gscites = {75}, + gscites = {608}, gsid = {12275877761746940475}, journal = {arXiv:1902.09063}, month = {2}, optnote = {DIAG}, year = {2019}, + ss_id = {4654aa505e5bcdb089d0df202cd7ceabc9d2d41f}, + all_ss_ids = {['4654aa505e5bcdb089d0df202cd7ceabc9d2d41f']}, +} + +@article{Slaa21, + author = {Slaats, Jeroen and Dieteren, Cindy E. and Wagena, Esther and Wolf, Louis and Raaijmakers, Tonke K. and van der Laak, Jeroen A. and Figdor, Carl G. and Weigelin, Bettina and Friedl, Peter}, + title = {~~Metabolic Screening of Cytotoxic T-cell Effector Function Reveals the Role of CRAC Channels in Regulating Lethal Hit Delivery}, + doi = {10.1158/2326-6066.cir-20-0741}, + year = {2021}, + abstract = {AbstractCytotoxic T lymphocytes (CTL) mediate cytotoxicity toward tumor cells by multistep cell-cell interactions. However, the tumor microenvironment can metabolically perturb local CTL effector function. CTL activity is typically studied in two-dimensional (2D) liquid coculture, which is limited in recapitulating the mechanisms and efficacy of the multistep CTL effector response. We here developed a microscopy-based, automated three-dimensional (3D) interface coculture model suitable for medium-throughput screening to delineate the steps and CTL effector mechanisms affected by microenvironmental perturbation. CTL effector function was compromised by deregulated redox homeostasis, deficient mitochondrial respiration, as well as dysfunctional Ca2+ release-activated Ca2+ (CRAC) channels. Perturbation of CRAC channel function dampened calcium influx into CTLs, delayed CTL degranulation, and lowered the frequency of sublethal hits (i.e., additive cytotoxicity) delivered to the target cell. Thus, CRAC channel activity controls both individual contact efficacy and CTL cooperativity required for serial killing of target cells. The multistep analysis of CTL effector responses in 3D coculture will facilitate the identification of immune-suppressive mechanisms and guide the rational design of targeted intervention strategies to restore CTL effector function.}, + url = {http://dx.doi.org/10.1158/2326-6066.CIR-20-0741}, + file = {Slaa21.pdf:pdf\\Slaa21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Cancer Immunology Research}, + automatic = {yes}, + citation-count = {5}, + pages = {926-938}, + volume = {9}, + pmid = {34226201}, } @article{Sleb15, @@ -22693,19 +28225,19 @@ @mastersthesis{Sloo20 author = {Ilse Slootweg}, title = {Patient variables related to false predictions of deep-learning assisted prostate cancer detection in MRI}, abstract = {Background: - DL-CAD for prediction of clinically significant prostate cancer (csPCa) in mpMRI is developed to aid radiologists in PI-RADS evaluation. DL-CAD predictions have low accuracy, possibly due to clinical risk factors of csPCa that are not taken into account by DL-CAD. - - Purpose: - Aim is to identify patient subgroups of clinical characteristics in which DL-CAD predictions differ from radiologists. - - Methods: - DL-CAD was applied to a test cohort of men examined for PCa according to PI-RADSv2 between 2016 and 2017. Ground truth was provided by manually annotated PI-RADS >=4 lesions. Patient age and PSA were derived from the electronic patient record and other variables were mined from the written radiological reports. False and correct predicted patients were compared on variable distributions and false positive rates were compared between variable categories. - - Results: - CsPCa was predicted for a total of 482 men (36.9% PIRADS >=4). Benign and malignant patients statistically differed on all clinical variables (P<.05). DL-CAD negative predictive value and positive predictive value were 0.912 and 0.457, respectively. False and correct positive predicted patients significantly differed on age (P<.05), PSA (P<.001), and PSAD (P<.001) as well as prostate volume (P<.001), number of lesions (P<.001), and number of affected zones (P<.001). Analysis of negative predictions was inconclusive due to small population size. - - Conclusions: - False positive DL-CAD csPCa predictions are due to unavailable clinical variables that are used in radiologists' PI-RADS risk assessment. We advise to study the effect of including age, PSA and PSAD information in DL-CAD input on prediction accuracy.}, + DL-CAD for prediction of clinically significant prostate cancer (csPCa) in mpMRI is developed to aid radiologists in PI-RADS evaluation. DL-CAD predictions have low accuracy, possibly due to clinical risk factors of csPCa that are not taken into account by DL-CAD. + + Purpose: + Aim is to identify patient subgroups of clinical characteristics in which DL-CAD predictions differ from radiologists. + + Methods: + DL-CAD was applied to a test cohort of men examined for PCa according to PI-RADSv2 between 2016 and 2017. Ground truth was provided by manually annotated PI-RADS >=4 lesions. Patient age and PSA were derived from the electronic patient record and other variables were mined from the written radiological reports. False and correct predicted patients were compared on variable distributions and false positive rates were compared between variable categories. + + Results: + CsPCa was predicted for a total of 482 men (36.9% PIRADS >=4). Benign and malignant patients statistically differed on all clinical variables (P<.05). DL-CAD negative predictive value and positive predictive value were 0.912 and 0.457, respectively. False and correct positive predicted patients significantly differed on age (P<.05), PSA (P<.001), and PSAD (P<.001) as well as prostate volume (P<.001), number of lesions (P<.001), and number of affected zones (P<.001). Analysis of negative predictions was inconclusive due to small population size. + + Conclusions: + False positive DL-CAD csPCa predictions are due to unavailable clinical variables that are used in radiologists' PI-RADS risk assessment. We advise to study the effect of including age, PSA and PSAD information in DL-CAD input on prediction accuracy.}, file = {:pdf/Sloo20.pdf:PDF}, optnote = {DIAG}, school = {Radboud University Medical Center}, @@ -22723,6 +28255,8 @@ @inproceedings{Slui02 optnote = {DIAG, RADIOLOGY}, gsid = {9666435673167423528}, gscites = {2}, + ss_id = {ee7ea95666aa2686dbf70c6aeed8c2794ed3cbac}, + all_ss_ids = {['ee7ea95666aa2686dbf70c6aeed8c2794ed3cbac']}, } @article{Slui03, @@ -22741,6 +28275,8 @@ @article{Slui03 month = {11}, gsid = {6511737817147038623}, gscites = {181}, + ss_id = {e2e040d3f75ff4e4c04b281f46163806f80ebcce}, + all_ss_ids = {['e2e040d3f75ff4e4c04b281f46163806f80ebcce']}, } @conference{Slui03a, @@ -22767,6 +28303,8 @@ @inproceedings{Slui04 month = {5}, gsid = {97306129207600256}, gscites = {32}, + ss_id = {7a1964f613685258b49161681d67585ddff896c2}, + all_ss_ids = {['7a1964f613685258b49161681d67585ddff896c2']}, } @phdthesis{Slui05, @@ -22799,6 +28337,8 @@ @article{Slui05a month = {8}, gsid = {884962951440896774}, gscites = {320}, + ss_id = {2421eaf85e274e59508c87df80bc4242edae7168}, + all_ss_ids = {['2421eaf85e274e59508c87df80bc4242edae7168']}, } @article{Slui06, @@ -22817,6 +28357,8 @@ @article{Slui06 month = {6}, gsid = {7126259624329785862}, gscites = {79}, + ss_id = {cb724200872323436552c2303f4d8f3ae9ea7d1a}, + all_ss_ids = {['cb724200872323436552c2303f4d8f3ae9ea7d1a']}, } @article{Slui06a, @@ -22835,6 +28377,34 @@ @article{Slui06a month = {4}, gsid = {10943222434899920168}, gscites = {621}, + ss_id = {ecad8c357147604fc065e717f54758ad4c10552b}, + all_ss_ids = {['ecad8c357147604fc065e717f54758ad4c10552b']}, +} + +@article{Slui23, + author = {van der Sluijs, Koen M. and Thannhauser, Jos and Visser, Iris M. and Nabeel, P. M. and Raj, Kiran V. and Malik, Afrah E. F. and Reesink, Koen D. and Eijsvogels, Thijs M. H. and Bakker, Esm\'{e}e A. and Kaur, Prabhdeep and Joseph, Jayaraj and Thijssen, Dick H. J.}, + title = {~~Central and local arterial stiffness in White Europeans compared to age-, sex-, and BMI-matched South Asians}, + doi = {10.1371/journal.pone.0290118}, + year = {2023}, + abstract = { + Background + Ethnicity impacts cardiovascular disease (CVD) risk, and South Asians demonstrate a higher risk than White Europeans. Arterial stiffness is known to contribute to CVD, and differences in arterial stiffness between ethnicities could explain the disparity in CVD risk. We compared central and local arterial stiffness between White Europeans and South Asians and investigated which factors are associated with arterial stiffness. + Methods + Data were collected from cohorts of White Europeans (the Netherlands) and South Asians (India). We matched cohorts on individual level using age, sex, and body mass index (BMI). Arterial stiffness was measured with ARTSENS(r) Plus. Central stiffness was expressed as carotid-femoral pulse wave velocity (cf-PWV, m/s), and local carotid stiffness was quantified using the carotid stiffness index (Beta) and pressure-strain elastic modulus (Epsilon, kPa). We compared arterial stiffness between cohorts and used multivariable linear regression to identify factors related to stiffness. + Results + We included n = 121 participants per cohort (age 53+-10 years, 55% male, BMI 24 kg/m2). Cf-PWV was lower in White Europeans compared to South Asians (6.8+-1.9 vs. 8.2+-1.8 m/s, p<0.001), but no differences were found for local stiffness parameters Beta (5.4+-2.4 vs. 5.8+-2.3, p = 0.17) and Epsilon (72+-35 vs. 70+-31 kPa, p = 0.56). Age (standardized b, 95% confidence interval: 0.28, 0.17-0.39), systolic blood pressure (0.32, 0.21-0.43), and South Asian ethnicity (0.46, 0.35-0.57) were associated with cf-PWV; associations were similar between cohorts (p>0.05 for interaction). Systolic blood pressure was associated with carotid stiffness in both cohorts, whereas age was associated to carotid stiffness only in South Asians and BMI only in White Europeans. + Conclusion + Ethnicity is associated with central but not local arterial stiffness. Conversely, ethnicity seems to modify associations between CVD risk factors and local but not central arterial stiffness. This suggests that ethnicity interacts with arterial stiffness measures and the association of these measures with CVD risk factors. + }, + url = {http://dx.doi.org/10.1371/journal.pone.0290118}, + file = {Slui23.pdf:pdf\\Slui23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {PLOS ONE}, + automatic = {yes}, + citation-count = {0}, + pages = {e0290118}, + volume = {18}, + pmid = {37616275}, } @conference{Smee18, @@ -22842,32 +28412,32 @@ @conference{Smee18 booktitle = {European Society for Molecular Imaging}, title = {Tumor heterogeneity as a PET-biomarker predicts overall survival of pancreatic cancer patients}, abstract = {INTRODUCTION - Pancreatic ductal adenocarcinoma (PDAC) shows a 5-year survival rate of 8%[1]. This mortality results from a lack of methods to accurately treat patients[2]. PDAC is remarkable for its fibrotic reaction, which is present at early stages of PDAC development[3]. Components of this environment can be measured on clinical images[4]. PET derived parameters, e.g. SUVmax, have not been able to provide prognostic information. In this study we developed an algorithm based on FDG-PET texture features (TF) that classifies heterogeneous or homogeneous tumors and shows a correlation with overall survival. - - - METHODS - In total, 121 patients with histologically proven PDAC who underwent 18F-FDG PET/CT (Siemens Biograph mCT, Knoxville, US) were selected from the hospital system. Eighty-six EANM reconstructed scans were visually labeled as 'homogenous' or 'heterogeneous' by experienced Nuclear Medicine physicians and served as training set to develop the classifier [5]. All the 121 scans were used as validation set for the correlation with overall survival (OS). Tumors were delineated using 40% threshold of the SUVmax with manual correction. TF were extracted using the PyRadiomcis toolbox [6]. TF were selected and tested for robustness as described in literature [7-9]. The classifier was build using logistic regression. Prognostic impact was assessed by Kaplan Meier survival analysis and log-rank test. - - - RESULTS - Optimal performance of the leave-one-out cross-validation classifier in the training set yielded an accuracy of 0.73 and AUC of 0.71 in classifying PDAC as heterogeneous or homogeneous tumors. Of note, two tumors were visually labeled as homogenous but correctly classifier as heterogeneous by the classifier after review. For the 121 patients the OS of PDAC tumors classified as heterogeneous, was significantly worse than for homogeneous tumors; median OS 69 weeks (95%CI 64 to 91 weeks) versus median 95 weeks (95%CI 76 to 114), p= 0.0285). This is in contrast with single standard PET parameters, single TF or manual labeling, which had no significant prognostic impact. - - - CONCLUSIONS - We developed an algorithm that accurately classifies PDAC as heterogeneous or homogeneous, based on a set of 18F-FDG PET derived texture features. We showed that the classification result has prognostic value, improving upon standard PET derived parameters and single texture-features. Further validation of this algorithm in an external cohort of PDAC patients is ongoing. - - - REFERENCES - - [1] Siegel, R.L., K.D. Miller, and A. Jemal, Cancer statistics, 2016. CA Cancer J Clin, 2016. 66(1): p. 7-30. - [2] Ryan, D.P., T.S. Hong, and N. Bardeesy, Pancreatic adenocarcinoma. N Engl J Med, 2014. 371(11): p. 1039-49. - [3] Neesse, A., et al., Stromal biology and therapy in pancreatic cancer: a changing paradigm. Gut, 2015. 64(9): p. 1476-84. - [4] Heid, I., et al., Co-clinical Assessment of Tumor Cellularity in Pancreatic Cancer. Clin Cancer Res, 2017. 23(6): p. 1461-1470. - [5] Boellaard, R., et al., FDG PET and PET/CT: EANM procedure guidelines for tumour PET imaging: version 1.0. Eur J Nucl Med Mol Imaging, 2010. 37(1): p. 181-200. - [6] van Griethuysen, J.J.M., et al., Computational Radiomics System to Decode the Radiographic Phenotype. Cancer Res, 2017. 77(21): p. e104-e107. - [7] Yan, J., et al., Impact of Image Reconstruction Settings on Texture Features in 18F-FDG PET. J Nucl Med, 2015. 56(11): p. 1667-73. - [8] Leijenaar, R.T., et al., The effect of SUV discretization in quantitative FDG-PET Radiomics: the need for standardized methodology in tumor texture analysis. Sci Rep, 2015. 5: p. 11075. - [9] Grootjans, W., et al., The Impact of Optimal Respiratory Gating and Image Noise on Evaluation of Intratumor Heterogeneity on 18F-FDG PET Imaging of Lung Cancer. J Nucl Med, 2016. 57(11): p. 1692-1698.}, + Pancreatic ductal adenocarcinoma (PDAC) shows a 5-year survival rate of 8%[1]. This mortality results from a lack of methods to accurately treat patients[2]. PDAC is remarkable for its fibrotic reaction, which is present at early stages of PDAC development[3]. Components of this environment can be measured on clinical images[4]. PET derived parameters, e.g. SUVmax, have not been able to provide prognostic information. In this study we developed an algorithm based on FDG-PET texture features (TF) that classifies heterogeneous or homogeneous tumors and shows a correlation with overall survival. + + + METHODS + In total, 121 patients with histologically proven PDAC who underwent 18F-FDG PET/CT (Siemens Biograph mCT, Knoxville, US) were selected from the hospital system. Eighty-six EANM reconstructed scans were visually labeled as 'homogenous' or 'heterogeneous' by experienced Nuclear Medicine physicians and served as training set to develop the classifier [5]. All the 121 scans were used as validation set for the correlation with overall survival (OS). Tumors were delineated using 40% threshold of the SUVmax with manual correction. TF were extracted using the PyRadiomcis toolbox [6]. TF were selected and tested for robustness as described in literature [7-9]. The classifier was build using logistic regression. Prognostic impact was assessed by Kaplan Meier survival analysis and log-rank test. + + + RESULTS + Optimal performance of the leave-one-out cross-validation classifier in the training set yielded an accuracy of 0.73 and AUC of 0.71 in classifying PDAC as heterogeneous or homogeneous tumors. Of note, two tumors were visually labeled as homogenous but correctly classifier as heterogeneous by the classifier after review. For the 121 patients the OS of PDAC tumors classified as heterogeneous, was significantly worse than for homogeneous tumors; median OS 69 weeks (95%CI 64 to 91 weeks) versus median 95 weeks (95%CI 76 to 114), p= 0.0285). This is in contrast with single standard PET parameters, single TF or manual labeling, which had no significant prognostic impact. + + + CONCLUSIONS + We developed an algorithm that accurately classifies PDAC as heterogeneous or homogeneous, based on a set of 18F-FDG PET derived texture features. We showed that the classification result has prognostic value, improving upon standard PET derived parameters and single texture-features. Further validation of this algorithm in an external cohort of PDAC patients is ongoing. + + + REFERENCES + + [1] Siegel, R.L., K.D. Miller, and A. Jemal, Cancer statistics, 2016. CA Cancer J Clin, 2016. 66(1): p. 7-30. + [2] Ryan, D.P., T.S. Hong, and N. Bardeesy, Pancreatic adenocarcinoma. N Engl J Med, 2014. 371(11): p. 1039-49. + [3] Neesse, A., et al., Stromal biology and therapy in pancreatic cancer: a changing paradigm. Gut, 2015. 64(9): p. 1476-84. + [4] Heid, I., et al., Co-clinical Assessment of Tumor Cellularity in Pancreatic Cancer. Clin Cancer Res, 2017. 23(6): p. 1461-1470. + [5] Boellaard, R., et al., FDG PET and PET/CT: EANM procedure guidelines for tumour PET imaging: version 1.0. Eur J Nucl Med Mol Imaging, 2010. 37(1): p. 181-200. + [6] van Griethuysen, J.J.M., et al., Computational Radiomics System to Decode the Radiographic Phenotype. Cancer Res, 2017. 77(21): p. e104-e107. + [7] Yan, J., et al., Impact of Image Reconstruction Settings on Texture Features in 18F-FDG PET. J Nucl Med, 2015. 56(11): p. 1667-73. + [8] Leijenaar, R.T., et al., The effect of SUV discretization in quantitative FDG-PET Radiomics: the need for standardized methodology in tumor texture analysis. Sci Rep, 2015. 5: p. 11075. + [9] Grootjans, W., et al., The Impact of Optimal Respiratory Gating and Image Noise on Evaluation of Intratumor Heterogeneity on 18F-FDG PET Imaging of Lung Cancer. J Nucl Med, 2016. 57(11): p. 1692-1698.}, optnote = {DIAG, RADIOLOGY}, year = {2018}, } @@ -22904,8 +28474,10 @@ @article{Smit12 pmid = {22332063}, month = {4}, gsid = {11443188990303908052}, - gscites = {61}, + gscites = {66}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/109996}, + ss_id = {656947026157a7e1248db7f764db9e09e98e3b18}, + all_ss_ids = {['656947026157a7e1248db7f764db9e09e98e3b18']}, } @conference{Smit12a, @@ -22931,8 +28503,10 @@ @article{Smit13 pmid = {23760216}, month = {6}, gsid = {15052282752148575019}, - gscites = {95}, + gscites = {97}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/116747}, + ss_id = {41ec277af272f5fd6146fb13a8816289a464f6fd}, + all_ss_ids = {['41ec277af272f5fd6146fb13a8816289a464f6fd']}, } @article{Smit15, @@ -22949,7 +28523,9 @@ @article{Smit15 pmid = {26113070}, month = {6}, gsid = {7113799955940245108}, - gscites = {16}, + gscites = {21}, + ss_id = {f948e6ceea98e653874bae245610957e54c76121}, + all_ss_ids = {['f948e6ceea98e653874bae245610957e54c76121']}, } @phdthesis{Smit19, @@ -22977,23 +28553,26 @@ @inproceedings{Smit21 @article{Smit23, author = {Marloes A. Smit and Francesco Ciompi and John-Melle Bokhorst and Gabi W. van Pelt and Oscar G.F. Geessink and Hein Putter and Rob A.E.M. Tollenaar and J. Han J.M. van Krieken and Wilma E. Mesker and Jeroen A.W.M. van der Laak}, - title = {Deep learning based tumor–stroma ratio scoring in colon cancer correlates with microscopic assessment}, + title = {Deep learning based tumor-stroma ratio scoring in colon cancer correlates with microscopic assessment}, journal = {Journal of Pathology Informatics}, year = {2023}, doi = {https://doi.org/10.1016/j.jpi.2023.100191}, abstract = {Background -The amount of stroma within the primary tumor is a prognostic parameter for colon cancer patients. This phenomenon can be assessed using the tumor–stroma ratio (TSR), which classifies tumors in stroma-low (≤50% stroma) and stroma-high (>50% stroma). Although the reproducibility for TSR determination is good, improvement might be expected from automation. The aim of this study was to investigate whether the scoring of the TSR in a semi- and fully automated method using deep learning algorithms is feasible. - -Methods -A series of 75 colon cancer slides were selected from a trial series of the UNITED study. For the standard determination of the TSR, 3 observers scored the histological slides. Next, the slides were digitized, color normalized, and the stroma percentages were scored using semi- and fully automated deep learning algorithms. Correlations were determined using intraclass correlation coefficients (ICCs) and Spearman rank correlations. - -Results -37 (49%) cases were classified as stroma-low and 38 (51%) as stroma-high by visual estimation. A high level of concordance between the 3 observers was reached, with ICCs of 0.91, 0.89, and 0.94 (all P<.001). Between visual and semi-automated assessment the ICC was 0.78 (95% CI 0.23–0.91, P-value 0.005), with a Spearman correlation of 0.88 (P<.001). Spearman correlation coefficients above 0.70 (N=3) were observed for visual estimation versus the fully automated scoring procedures. - -Conclusion -Good correlations were observed between standard visual TSR determination and semi- and fully automated TSR scores. At this point, visual examination has the highest observer agreement, but semi-automated scoring could be helpful to support pathologists.}, + The amount of stroma within the primary tumor is a prognostic parameter for colon cancer patients. This phenomenon can be assessed using the tumor-stroma ratio (TSR), which classifies tumors in stroma-low (<=50% stroma) and stroma-high (>50% stroma). Although the reproducibility for TSR determination is good, improvement might be expected from automation. The aim of this study was to investigate whether the scoring of the TSR in a semi- and fully automated method using deep learning algorithms is feasible. + + Methods + A series of 75 colon cancer slides were selected from a trial series of the UNITED study. For the standard determination of the TSR, 3 observers scored the histological slides. Next, the slides were digitized, color normalized, and the stroma percentages were scored using semi- and fully automated deep learning algorithms. Correlations were determined using intraclass correlation coefficients (ICCs) and Spearman rank correlations. + + Results + 37 (49%) cases were classified as stroma-low and 38 (51%) as stroma-high by visual estimation. A high level of concordance between the 3 observers was reached, with ICCs of 0.91, 0.89, and 0.94 (all P<.001). Between visual and semi-automated assessment the ICC was 0.78 (95% CI 0.23-0.91, P-value 0.005), with a Spearman correlation of 0.88 (P<.001). Spearman correlation coefficients above 0.70 (N=3) were observed for visual estimation versus the fully automated scoring procedures. + + Conclusion + Good correlations were observed between standard visual TSR determination and semi- and fully automated TSR scores. At this point, visual examination has the highest observer agreement, but semi-automated scoring could be helpful to support pathologists.}, file = {Smit23.pdf:pdf\\Smit23.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {935f46ae7c1a5be1ed7a5e176db38fb919bf30df}, + all_ss_ids = {['935f46ae7c1a5be1ed7a5e176db38fb919bf30df']}, + gscites = {3}, } @inproceedings{Snoe03, @@ -23011,6 +28590,8 @@ @inproceedings{Snoe03 pmid = {15344475}, gsid = {4086411044599121897}, gscites = {6}, + ss_id = {21f36695f2f92200916359a1b61d5e5648e2dd49}, + all_ss_ids = {['21f36695f2f92200916359a1b61d5e5648e2dd49']}, } @article{Snoe04, @@ -23028,6 +28609,8 @@ @article{Snoe04 month = {7}, gsid = {2352303434720850503}, gscites = {51}, + ss_id = {8d31ec41bad5116e2cba21ba5b5d165f770c5d3c}, + all_ss_ids = {['8d31ec41bad5116e2cba21ba5b5d165f770c5d3c']}, } @inproceedings{Snoe05, @@ -23045,7 +28628,9 @@ @inproceedings{Snoe05 optnote = {DIAG, RADIOLOGY}, month = {4}, gsid = {263805911348911436}, - gscites = {18}, + gscites = {19}, + ss_id = {2e5d21f0b2553210b0b0ec6e2b8c292de8c9106c}, + all_ss_ids = {['2e5d21f0b2553210b0b0ec6e2b8c292de8c9106c']}, } @article{Snoe07, @@ -23063,7 +28648,9 @@ @article{Snoe07 pmid = {17208511}, month = {4}, gsid = {15703766258730611185}, - gscites = {11}, + gscites = {12}, + ss_id = {2a2d8905070d3dab2cdf73577193113a058326ac}, + all_ss_ids = {['2a2d8905070d3dab2cdf73577193113a058326ac']}, } @inproceedings{Snoe10, @@ -23076,7 +28663,26 @@ @inproceedings{Snoe10 file = {Snoe10.pdf:pdf\\Snoe10.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {6149147987068257017}, - gscites = {9}, + gscites = {10}, + ss_id = {9d273b257071f18da2d269a81a635f1e5dac740e}, + all_ss_ids = {['9d273b257071f18da2d269a81a635f1e5dac740e']}, +} + +@article{Snoe21, + author = {Snoeckx, Annemiek and Franck, Caro and Silva, Mario and Prokop, Mathias and Schaefer-Prokop, Cornelia and Revel, Marie-Pierre}, + title = {~~The radiologist's role in lung cancer screening}, + doi = {10.21037/tlcr-20-924}, + year = {2021}, + abstract = {The radiologist's role in lung cancer screening}, + url = {http://dx.doi.org/10.21037/TLCR-20-924}, + file = {Snoe21.pdf:pdf\\Snoe21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Translational Lung Cancer Research}, + automatic = {yes}, + citation-count = {6}, + pages = {2356-2367}, + volume = {10}, + pmid = {34164283}, } @article{Soga18, @@ -23088,7 +28694,9 @@ @article{Soga18 optnote = {DIAG}, month = {8}, gsid = {9158256103222862149}, - gscites = {4}, + gscites = {15}, + ss_id = {eb6f5da9826e08c4d940267c5c90bcbbce268f46}, + all_ss_ids = {['eb6f5da9826e08c4d940267c5c90bcbbce268f46']}, } @article{Soga20, @@ -23103,6 +28711,9 @@ @article{Soga20 abstract = {In this study, we investigate the detection of cardiomegaly on frontal chest radiographs through two alternative deep-learning approaches - via anatomical segmentation and via image-level classification. We used the publicly available ChestX-ray14 dataset, and obtained heart and lung segmentation annotations for 778 chest radiographs for the development of the segmentation-based approach. The classification-based method was trained with 65k standard chest radiographs with image-level labels. For both approaches, the best models were found through hyperparameter searches where architectural, learning, and regularization related parameters were optimized systematically. The resulting models were tested on a set of 367 held-out images for which cardiomegaly annotations were hand-labeled by two independent expert radiologists. Sensitivity, specificity, positive predictive value, negative predictive value, and area under the receiver operating characteristic curve (AUC) were calculated. The performance of the segmentation-based system with an AUC of 0.977 is significantly better for classifying cardiomegaly than the classification-based model which achieved an AUC of 0.941. Only the segmentation-based model achieved comparable performance to an independent expert reader (AUC of 0.978). We conclude that the segmentation-based model requires 100 times fewer annotated chest radiographs to achieve a substantially better performance, while also producing more interpretable results.}, file = {Soga20.pdf:pdf\\Soga20.pdf:PDF}, optnote = {DIAG, INPRESS, RADIOLOGY}, + ss_id = {4e152f76ccc69de9323ff63ca8fc5bbb61fa1712}, + all_ss_ids = {['4e152f76ccc69de9323ff63ca8fc5bbb61fa1712']}, + gscites = {22}, } @article{Soga22, @@ -23115,12 +28726,15 @@ @article{Soga22 pages = {4466--4477}, volume = {49}, abstract = {BACKGROUND: Total lung volume is an important quantitative biomarker and is used for the assessment of restrictive lung diseases. - PURPOSE: In this study, we investigate the performance of several deep-learning approaches for automated measurement of total lung volume from chest radiographs. - METHODS: About 7621 posteroanterior and lateral view chest radiographs (CXR) were collected from patients with chest CT available. Similarly, 928 CXR studies were chosen from patients with pulmonary function test (PFT) results. The reference total lung volume was calculated from lung segmentation on CT or PFT data, respectively. This dataset was used to train deep-learning architectures to predict total lung volume from chest radiographs. The experiments were constructed in a stepwise fashion with increasing complexity to demonstrate the effect of training with CT-derived labels only and the sources of error. The optimal models were tested on 291 CXR studies with reference lung volume obtained from PFT. Mean absolute error (MAE), mean absolute percentage error (MAPE), and Pearson correlation coefficient (Pearson's r) were computed. - RESULTS: The optimal deep-learning regression model showed an MAE of 408 ml and an MAPE of 8.1\% using both frontal and lateral chest radiographs as input. The predictions were highly correlated with the reference standard (Pearson's r = 0.92). CT-derived labels were useful for pretraining but the optimal performance was obtained by fine-tuning the network with PFT-derived labels. - CONCLUSION: We demonstrate, for the first time, that state-of-the-art deep-learning solutions can accurately measure total lung volume from plain chest radiographs. The proposed model is made publicly available and can be used to obtain total lung volume from routinely acquired chest radiographs at no additional cost. This deep-learning system can be a useful tool to identify trends over time in patients referred regularly for chest X-ray.}, + PURPOSE: In this study, we investigate the performance of several deep-learning approaches for automated measurement of total lung volume from chest radiographs. + METHODS: About 7621 posteroanterior and lateral view chest radiographs (CXR) were collected from patients with chest CT available. Similarly, 928 CXR studies were chosen from patients with pulmonary function test (PFT) results. The reference total lung volume was calculated from lung segmentation on CT or PFT data, respectively. This dataset was used to train deep-learning architectures to predict total lung volume from chest radiographs. The experiments were constructed in a stepwise fashion with increasing complexity to demonstrate the effect of training with CT-derived labels only and the sources of error. The optimal models were tested on 291 CXR studies with reference lung volume obtained from PFT. Mean absolute error (MAE), mean absolute percentage error (MAPE), and Pearson correlation coefficient (Pearson's r) were computed. + RESULTS: The optimal deep-learning regression model showed an MAE of 408 ml and an MAPE of 8.1\% using both frontal and lateral chest radiographs as input. The predictions were highly correlated with the reference standard (Pearson's r = 0.92). CT-derived labels were useful for pretraining but the optimal performance was obtained by fine-tuning the network with PFT-derived labels. + CONCLUSION: We demonstrate, for the first time, that state-of-the-art deep-learning solutions can accurately measure total lung volume from plain chest radiographs. The proposed model is made publicly available and can be used to obtain total lung volume from routinely acquired chest radiographs at no additional cost. This deep-learning system can be a useful tool to identify trends over time in patients referred regularly for chest X-ray.}, file = {PubMed entry:http\://www.ncbi.nlm.nih.gov/pubmed/35388486:text/html}, pmid = {35388486}, + ss_id = {1d5f65d8f721089fca1e7fac8d1ef214f12e2c23}, + all_ss_ids = {['1d5f65d8f721089fca1e7fac8d1ef214f12e2c23']}, + gscites = {4}, } @article{Soly15, @@ -23137,13 +28751,16 @@ @article{Soly15 number = {10}, pmid = {26430890}, month = {10}, + ss_id = {21a62e34c2b257a66a7f2f539364594b7756a681}, + all_ss_ids = {['21a62e34c2b257a66a7f2f539364594b7756a681']}, + gscites = {19}, } @mastersthesis{Sons19, author = {Patrick Sonsma}, title = {Lymphocyte detection in hematoxylin-eosin stained histopathological images of breast cancer}, abstract = {Lymphocytes are immune cells that form an important bio-marker in the prognosis of breast cancer. In some cases more effective treatment can be chosen based on the lymphocyte presence near tumor regions. For trained pathologists the detection of lymphocytes in Hematoxylin-Eosin stained images is however a challenging and time intensive task with subjective interpretations. In this research we explore the lymphocyte detection problem with a deep learning approach and strive towards a robust, objective and efficient tool for computer aided diagnosis. - We generate a large data-set with machine produced labels by applying an existing model on destained and restained immunohistochemical histopathological images. On this data we train and evaluate a more minimal rendition of the known YOLO object detection model and report moderate results.}, + We generate a large data-set with machine produced labels by applying an existing model on destained and restained immunohistochemical histopathological images. On this data we train and evaluate a more minimal rendition of the known YOLO object detection model and report moderate results.}, file = {Sons19.pdf:pdf\\Sons19.pdf:PDF}, optnote = {DIAG}, school = {Radboud University}, @@ -23193,22 +28810,32 @@ @conference{Spro22 booktitle = {Immuno-Oncology and Technology}, year = {2022}, abstract = {Background -Immunotherapy has become the standard of care for metastatic non-small cell lung cancer (mNSCLC) without a targetable driver alteration, yet we still lack insight into which patients (pts) will benefit from such treatments. To that end, we investigated characteristics of the immune infiltrate in the tumor microenvironment in relation to immunotherapy response. We report the results of an automated deep learning approach applied to digital H&E whole slide images (WSIs) of pre-treatment biopsies from the PEMBRO-RT clinical trial. - -Methods -61 quality-checked H&E WSIs were processed with 3 deep learning algorithms. We extracted a tissue mask using an existing method (Bándi et al., 2019), and detected tumor and immune cells using HoVerNet (Graham et al., 2019). Tumor clusters were identified by combining the output of HoVerNet and tumor segmentation from an nnUnet (Isensee et al., 2021) model that we trained on external NSCLC images. From the output of this pipeline, we extracted immune infiltrate-based density metrics, calculated over all tissue (allINF), stroma within 500um from the tumor border (sINF), tumor region (tINF), and the combination of stroma and tumor (t+sINF). All metrics were used in ROC analysis after dichotomizing pts as responders and non-responders (response was defined as complete or partial response at any time point or stable disease for ≥12 weeks according to RECIST 1.1 measurement). Differences in metric distributions between the two groups were tested with a two-sided Welch t-test. Kaplan-Meier (KM) analysis was performed on progression-free survival (5-year follow-up). - -Results -Our automated analysis reported denser immune infiltrates in responders, although not statistically significant (0.05 0.63, where tINF reported an AUC of 0.70. KM analysis showed p=0.07 if pts were stratified based on the median tINF, and p=0.02 if stratified based on the optimal operating point of its ROC curve. - -Conclusions -Deep learning models that analyze the immune infiltrate density on H&E WSIs can identify mNSCLC responders to pembrolizumab.}, + Immunotherapy has become the standard of care for metastatic non-small cell lung cancer (mNSCLC) without a targetable driver alteration, yet we still lack insight into which patients (pts) will benefit from such treatments. To that end, we investigated characteristics of the immune infiltrate in the tumor microenvironment in relation to immunotherapy response. We report the results of an automated deep learning approach applied to digital H&E whole slide images (WSIs) of pre-treatment biopsies from the PEMBRO-RT clinical trial. + + Methods + 61 quality-checked H&E WSIs were processed with 3 deep learning algorithms. We extracted a tissue mask using an existing method (Bandi et al., 2019), and detected tumor and immune cells using HoVerNet (Graham et al., 2019). Tumor clusters were identified by combining the output of HoVerNet and tumor segmentation from an nnUnet (Isensee et al., 2021) model that we trained on external NSCLC images. From the output of this pipeline, we extracted immune infiltrate-based density metrics, calculated over all tissue (allINF), stroma within 500um from the tumor border (sINF), tumor region (tINF), and the combination of stroma and tumor (t+sINF). All metrics were used in ROC analysis after dichotomizing pts as responders and non-responders (response was defined as complete or partial response at any time point or stable disease for >=12 weeks according to RECIST 1.1 measurement). Differences in metric distributions between the two groups were tested with a two-sided Welch t-test. Kaplan-Meier (KM) analysis was performed on progression-free survival (5-year follow-up). + + Results + Our automated analysis reported denser immune infiltrates in responders, although not statistically significant (0.05 0.63, where tINF reported an AUC of 0.70. KM analysis showed p=0.07 if pts were stratified based on the median tINF, and p=0.02 if stratified based on the optimal operating point of its ROC curve. + + Conclusions + Deep learning models that analyze the immune infiltrate density on H&E WSIs can identify mNSCLC responders to pembrolizumab.}, optnote = {DIAG, RADIOLOGY}, } -@article{Srik12, - author = {Srikantha, Abhilash and Harz, Markus and Wang, Lei and Platel, Bram and Mann, Ritse M. and Hahn, Horst K. and Peitgen, Heinz-Otto}, - title = {Symmetry-based detection of ductal carcinoma in situ in breast {MRI}}, +@inproceedings{Spro23, + author = {Joey Spronck and Thijs Gelton and Leander van Eekelen and Joep Bogaerts and Leslie Tessier and Mart van Rijthoven and Lieke van der Woude and Michel van den Heuvel and Willemijn Theelen and Jeroen van der Laak and Francesco Ciompi}, + booktitle = MIDL, + title = {nnUNet meets pathology: bridging the gap for application to whole-slide images and computational biomarkers}, + url = {https://openreview.net/forum?id=aHuwlUu_QR}, + abstract = {Image segmentation is at the core of many tasks in medical imaging, including the engineering of computational biomarkers. While the self-configuring nnUNet framework for image segmentation tasks completely shifted the state-of-the-art in the radiology field, it has never been applied to the field of histopathology, likely due to inherent limitations that nnUNet has when applied to the pathology domain "off-the-shelf". We introduce nnUNet for pathology, built upon the original nnUNet, and engineered domain-specific solutions to bridge the gap between radiology and pathology. Our framework includes critical hyperparameter adjustments and pathology-specific color augmentations, as well as an essential whole-slide image inference pipeline. We developed and validated our approach on non-small cell lung cancer data, showing the effectiveness of nnUNet for pathology over default nnUNet settings, and achieved the first position on the experimental segmentation task of the TIGER challenge on breast cancer data when using our pipeline "out-of-the-box". We also introduce a novel inference uncertainty approach, which proved helpful for the quantification of the tumor-infiltrating lymphocytes biomarker in non-small cell lung cancer biopsies of patients treated with immunotherapy. We coded our framework as a workflow-friendly segmentation tool and made it publicly available.}, + optnote = {DIAG, PATHOLOGY}, + year = {2023}, +} + +@article{Srik12, + author = {Srikantha, Abhilash and Harz, Markus and Wang, Lei and Platel, Bram and Mann, Ritse M. and Hahn, Horst K. and Peitgen, Heinz-Otto}, + title = {Symmetry-based detection of ductal carcinoma in situ in breast {MRI}}, journal = EJR, year = {2013}, volume = {81 Suppl 1}, @@ -23220,6 +28847,8 @@ @article{Srik12 month = {9}, gsid = {10081678298073820973}, gscites = {2}, + ss_id = {e5f7b5efa305a5936b7a89db4a30c35a8e51fe76}, + all_ss_ids = {['e5f7b5efa305a5936b7a89db4a30c35a8e51fe76']}, } @inproceedings{Srik13, @@ -23249,7 +28878,9 @@ @inproceedings{Staa02 file = {Staa02.pdf:pdf\\Staa02.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {13070870055231707928}, - gscites = {12}, + gscites = {14}, + ss_id = {d5ea31e555f04e668dce705c220241750de6b4f6}, + all_ss_ids = {['d5ea31e555f04e668dce705c220241750de6b4f6']}, } @phdthesis{Staa04, @@ -23284,6 +28915,8 @@ @article{Staa04a month = {4}, gsid = {1421589366492561845}, gscites = {2720}, + ss_id = {ac9748ea3945eb970cc32a37db7cfdfd0f22e74c}, + all_ss_ids = {['ac9748ea3945eb970cc32a37db7cfdfd0f22e74c']}, } @inproceedings{Staa04b, @@ -23299,6 +28932,8 @@ @inproceedings{Staa04b optnote = {DIAG, RADIOLOGY}, gsid = {917845112200227194}, gscites = {16}, + ss_id = {e2a49c9e56d53f9b09ed50de495452bbf74adc03}, + all_ss_ids = {['e2a49c9e56d53f9b09ed50de495452bbf74adc03']}, } @article{Staa07, @@ -23317,6 +28952,8 @@ @article{Staa07 month = {2}, gsid = {6032541118265568544}, gscites = {72}, + ss_id = {6c776183e47ff076b5de754dfd1f3942e1aecbac}, + all_ss_ids = {['6c776183e47ff076b5de754dfd1f3942e1aecbac']}, } @inproceedings{Stan18, @@ -23331,6 +28968,9 @@ @inproceedings{Stan18 file = {:pdf/Stan18.pdf:PDF}, optnote = {DIAG}, month = {2}, + ss_id = {a5535bb713b3cf4546242a3434877c95ee02b02b}, + all_ss_ids = {['a5535bb713b3cf4546242a3434877c95ee02b02b']}, + gscites = {13}, } @article{Star09, @@ -23348,8 +28988,10 @@ @article{Star09 pmid = {19104438}, month = {2}, gsid = {16017624786647148270}, - gscites = {17}, + gscites = {19}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/80400}, + ss_id = {d64d150155c9e1995efae28446625f1dfced6031}, + all_ss_ids = {['d64d150155c9e1995efae28446625f1dfced6031']}, } @inproceedings{Stav96, @@ -23402,6 +29044,9 @@ @article{Stee17 pmid = {28130702}, year = {2017}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/174271}, + ss_id = {17e7607acf9b2aeda7b32e369df4a5ea2f903194}, + all_ss_ids = {['17e7607acf9b2aeda7b32e369df4a5ea2f903194']}, + gscites = {21}, } @conference{Steg09, @@ -23455,6 +29100,8 @@ @article{Stei15 pmid = {26767179}, gsid = {16224410692512843779}, gscites = {15}, + ss_id = {f00068fa78dee1aba549dda6d6dca018287bae61}, + all_ss_ids = {['f00068fa78dee1aba549dda6d6dca018287bae61']}, } @article{Stoe08, @@ -23472,14 +29119,16 @@ @article{Stoe08 month = {4}, gsid = {8941581572812805403}, gscites = {11}, + ss_id = {399a236e0f31ec8a941285aae48be979bcd875ee}, + all_ss_ids = {['399a236e0f31ec8a941285aae48be979bcd875ee']}, } @mastersthesis{Stoe19, author = {Emiel Stoelinga}, title = {Extracting biomarkers from hematoxylin-eosin stained histopathological images of lung cancer}, abstract = {In this thesis the technique of deep learning was applied to the field of digital pathology, more specifically lung cancer, to extract several different biomarkers. Tertairy lymphoid structures (TLS) have been found to indicate a positive patient prognosis, especially in combination with germinal centers (GC). Therefore, a VGG16-like network was trained to detect TLS and GC in histopathological slides of lung squamous cell carcinoma with F1 scores on the pixel level of 0.922 and 0.802 respectively. Performance on a different held-out test set on the object level was 0.640 and 0.500 for TLS and GC respectively. - Treatment differs per growth pattern of lung adenocarcinoma and variability between pathol ogists in the assessment of lung adenocarcinoma exists. Therefore, a similar VGG16-like network was trained to segment growth patterns of adenocarcinoma in slides of lung tissue with F1 scores on the pixel level of 0.891, 0.524, 0.812 and 0.954 for solid adenocarcinoma, acinar adenocarcinoma, micropapillary adenocarcinoma and non-tumor tissue respectively. - Because the previous system was only trained on sparsely annotated data and consequently did not encounter neighbouring growth patterns of lung adenocarcinoma, a method with genera tive adversarial networks to generate fake densely annotated realistic looking image patches from sparsely annotated data was examined and a comparison between three types of models was made.}, + Treatment differs per growth pattern of lung adenocarcinoma and variability between pathol ogists in the assessment of lung adenocarcinoma exists. Therefore, a similar VGG16-like network was trained to segment growth patterns of adenocarcinoma in slides of lung tissue with F1 scores on the pixel level of 0.891, 0.524, 0.812 and 0.954 for solid adenocarcinoma, acinar adenocarcinoma, micropapillary adenocarcinoma and non-tumor tissue respectively. + Because the previous system was only trained on sparsely annotated data and consequently did not encounter neighbouring growth patterns of lung adenocarcinoma, a method with genera tive adversarial networks to generate fake densely annotated realistic looking image patches from sparsely annotated data was examined and a comparison between three types of models was made.}, file = {Stoe19.pdf:pdf\\Stoe19.pdf:PDF}, optnote = {DIAG}, school = {Radboud University}, @@ -23493,37 +29142,37 @@ @conference{Stoi17a booktitle = {European Society for Magnetic Resonance in Medicine and Biology}, year = {2017}, abstract = {Purpose / Introduction - Prostate MRI finds 18% more clinically significant prostate cancer while avoiding 27% biopsies [1]. Reproducibility for multi-parametric T2+DWI+DCE - prostate MRI mpMRI is moderate [2] even though a PIRADS reading standard is available [3]. Quantification could help improve reproducibility, which to - some extent works for ADC. Scanner provided T2 maps are no solution as it leads to a different texture, lower spatial resolution and increased scan time. - We have previously developed a method for normalizing T2-weighted images [4]. The normalized value achieved a diagnostic accuracy AUC of 0.85 - over 0.64 for the raw T2-weighted values. That method required a separate proton density weighted sequence, an exact knowledge of the sequence - model and one reference tissue region. We propose a new method using multiple reference tissues that does not require an additional sequence, nor - detailed knowledge about the sequence model. The recent development of deep learning makes it feasible to segment multiple reference tissues. The - hypothesis is that the reference tissues allow building a patient specific model to normalize the T2-weighted prostate MR images for quantitative use. - Subjects and Methods - To test the hypothesis we manually delineated reference tissues and tumor lesions in mpMRI studies of prostate cancer patients. All lesions were - interpreted by expert radiologists and assigned a PIRADS score. The normalized T2 was then validated for its ability to discriminate PIRADS 2-3 from 4-5 - classes. Regions of interest ROI were drawn in four distinct tissue types in fifty T2-weighted images from regular multiparametric prostate MRI mpMRI. - The four reference tissue types were: obturator internus muscle, body fat, femoral head, bladder lumen. Four average ROI signals were computed per - patient. Each reference tissue was assigned a fixed reference value T2 relaxation found in literature. Per patient, a smooth spline model was fitted to the - average, reference pairs. The estimated spline model was then inverted to map patients' raw T2-weighted image scalar values to normalized values. The - effect of the normalization was determined by computing and comparing the diagnostic accuracy using ROC analysis. - Results - The area under the ROC AUC was significantly higher p<0.05 in normalized T2.5/22/2017 #542: Feasibility of multireference tissue normalization of T2weighted prostate MRI. - - Discussion / Conclusion - The significant improvement of the diagnostic accuracy demonstrates the potential of our normalization method for the quantitative interpretation of T2-weighted prostate MRI. The results were similar to our previous method.The method still requires manual delineation of multiple reference tissues, - however, we will develop deep learning segmentation methods to automate the method and enable regular clinical use. - References - 1. Ahmed, Hashim U., et al. "Diagnostic accuracy of multi?parametric MRI and TRUS biopsy in prostate cancer PROMIS: a paired validating confirmatory - study." The Lancet 389.10071 2017: 815?822. - 2. Rosenkrantz, Andrew B., et al. "Interobserver reproducibility of the PI?RADS version 2 lexicon: a multicenter study of six experienced prostate - radiologists." Radiology 280.3 2016: 793?804. - 3. Barentsz JO, et al. Synopsis of the PI?RADS v2 Guidelines for Multiparametric Prostate Magnetic Resonance Imaging and Recommendations for Use. - Eur. Urol. 2016;691:41-49. - 4. Vos, Pieter C., et al. "Computer?assisted analysis of peripheral zone prostate lesions using T2?weighted and dynamic contrast?enhanced T1?weighted - MRI." Physics Med. & Biol. 55.6 2010: 1719}, + Prostate MRI finds 18% more clinically significant prostate cancer while avoiding 27% biopsies [1]. Reproducibility for multi-parametric T2+DWI+DCE + prostate MRI mpMRI is moderate [2] even though a PIRADS reading standard is available [3]. Quantification could help improve reproducibility, which to + some extent works for ADC. Scanner provided T2 maps are no solution as it leads to a different texture, lower spatial resolution and increased scan time. + We have previously developed a method for normalizing T2-weighted images [4]. The normalized value achieved a diagnostic accuracy AUC of 0.85 + over 0.64 for the raw T2-weighted values. That method required a separate proton density weighted sequence, an exact knowledge of the sequence + model and one reference tissue region. We propose a new method using multiple reference tissues that does not require an additional sequence, nor + detailed knowledge about the sequence model. The recent development of deep learning makes it feasible to segment multiple reference tissues. The + hypothesis is that the reference tissues allow building a patient specific model to normalize the T2-weighted prostate MR images for quantitative use. + Subjects and Methods + To test the hypothesis we manually delineated reference tissues and tumor lesions in mpMRI studies of prostate cancer patients. All lesions were + interpreted by expert radiologists and assigned a PIRADS score. The normalized T2 was then validated for its ability to discriminate PIRADS 2-3 from 4-5 + classes. Regions of interest ROI were drawn in four distinct tissue types in fifty T2-weighted images from regular multiparametric prostate MRI mpMRI. + The four reference tissue types were: obturator internus muscle, body fat, femoral head, bladder lumen. Four average ROI signals were computed per + patient. Each reference tissue was assigned a fixed reference value T2 relaxation found in literature. Per patient, a smooth spline model was fitted to the + average, reference pairs. The estimated spline model was then inverted to map patients' raw T2-weighted image scalar values to normalized values. The + effect of the normalization was determined by computing and comparing the diagnostic accuracy using ROC analysis. + Results + The area under the ROC AUC was significantly higher p<0.05 in normalized T2.5/22/2017 #542: Feasibility of multireference tissue normalization of T2weighted prostate MRI. + + Discussion / Conclusion + The significant improvement of the diagnostic accuracy demonstrates the potential of our normalization method for the quantitative interpretation of T2-weighted prostate MRI. The results were similar to our previous method.The method still requires manual delineation of multiple reference tissues, + however, we will develop deep learning segmentation methods to automate the method and enable regular clinical use. + References + 1. Ahmed, Hashim U., et al. "Diagnostic accuracy of multi?parametric MRI and TRUS biopsy in prostate cancer PROMIS: a paired validating confirmatory + study." The Lancet 389.10071 2017: 815?822. + 2. Rosenkrantz, Andrew B., et al. "Interobserver reproducibility of the PI?RADS version 2 lexicon: a multicenter study of six experienced prostate + radiologists." Radiology 280.3 2016: 793?804. + 3. Barentsz JO, et al. Synopsis of the PI?RADS v2 Guidelines for Multiparametric Prostate Magnetic Resonance Imaging and Recommendations for Use. + Eur. Urol. 2016;691:41-49. + 4. Vos, Pieter C., et al. "Computer?assisted analysis of peripheral zone prostate lesions using T2?weighted and dynamic contrast?enhanced T1?weighted + MRI." Physics Med. & Biol. 55.6 2010: 1719}, optnote = {DIAG, RADIOLOGY}, } @@ -23533,28 +29182,28 @@ @conference{Stoi17b booktitle = RSNA, year = {2017}, abstract = {PURPOSE - To explore a novel multireferencetissue normalization method applied to t2weighted prostate MRI. - METHOD AND MATERIALS - Assuming the availability of a set of distinct reference tissue segmentations, the hypothesis is that it allows computing a patient specific sequence model - that can normalize MRI. The normalization should produce similar scalar values in the same reference regions for different patients/scanners/sequences - and interpolate in between reference values for other tissue areas. Regions of interest (ROI) were drawn in four distinct tissue types in a cohort of sixtyfive t2weighted images from regular multiparametric prostate MRI (mpMRI). The four reference tissue types were: skeletal muscle, body fat, femur - head, bladder lumen. Four average ROI signals were computed per patient. Each reference tissue was assigned a fixed reference value (t2 relaxation - found in literature). Per patient, a smooth sequence model was fitted to the (average, reference) pairs. The estimated sequence model was then - inverted to map patients' raw t2weighted image scalar values to normalized values. To test the method, the effect of normalization on observed - variance and tissue discriminability was analyzed. A leaveoneout experiment was performed in which for each ROI its normalized value was computed - using the sequence model estimate using the three remaining reference ROIs. The difference between original t2weighted and normalized scalar MRI - was analyzed by means of variability and ROC analysis. - RESULTS - Multireferencetissue normalization significantly (p<0.05) decreased variability and increased the area under the ROC curve for discriminating each - reference tissue combination. The ROC curves in the figure show the effect of the normalization (T2n) on the discrimination between body fat and - femur head tissue. - CONCLUSION - Semiautomatic multireferencetissue normalization shows reduced interpatient variability and may allow better quantitative discrimination between - tissue types. - CLINICAL RELEVANCE/APPLICATION - Multireferencetissue t2weighted MRI normalization seems feasible. In combination with automatic segmentation, this could be providing clinical - quantitative imaging support to mpMRI diagnosis of prostate cancer. This result motivates us to continue to explore the ability of this novel method to - help detect and discriminate prostate cancer in mpMR}, + To explore a novel multireferencetissue normalization method applied to t2weighted prostate MRI. + METHOD AND MATERIALS + Assuming the availability of a set of distinct reference tissue segmentations, the hypothesis is that it allows computing a patient specific sequence model + that can normalize MRI. The normalization should produce similar scalar values in the same reference regions for different patients/scanners/sequences + and interpolate in between reference values for other tissue areas. Regions of interest (ROI) were drawn in four distinct tissue types in a cohort of sixtyfive t2weighted images from regular multiparametric prostate MRI (mpMRI). The four reference tissue types were: skeletal muscle, body fat, femur + head, bladder lumen. Four average ROI signals were computed per patient. Each reference tissue was assigned a fixed reference value (t2 relaxation + found in literature). Per patient, a smooth sequence model was fitted to the (average, reference) pairs. The estimated sequence model was then + inverted to map patients' raw t2weighted image scalar values to normalized values. To test the method, the effect of normalization on observed + variance and tissue discriminability was analyzed. A leaveoneout experiment was performed in which for each ROI its normalized value was computed + using the sequence model estimate using the three remaining reference ROIs. The difference between original t2weighted and normalized scalar MRI + was analyzed by means of variability and ROC analysis. + RESULTS + Multireferencetissue normalization significantly (p<0.05) decreased variability and increased the area under the ROC curve for discriminating each + reference tissue combination. The ROC curves in the figure show the effect of the normalization (T2n) on the discrimination between body fat and + femur head tissue. + CONCLUSION + Semiautomatic multireferencetissue normalization shows reduced interpatient variability and may allow better quantitative discrimination between + tissue types. + CLINICAL RELEVANCE/APPLICATION + Multireferencetissue t2weighted MRI normalization seems feasible. In combination with automatic segmentation, this could be providing clinical + quantitative imaging support to mpMRI diagnosis of prostate cancer. This result motivates us to continue to explore the ability of this novel method to + help detect and discriminate prostate cancer in mpMR}, optnote = {DIAG, RADIOLOGY}, } @@ -23616,8 +29265,10 @@ @article{Stou07 number = {3}, pmid = {17729367}, gsid = {14001259981424088431}, - gscites = {41}, + gscites = {43}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/52499}, + ss_id = {35b0e9227e6a3141518aa6791d233272d003ce3a}, + all_ss_ids = {['35b0e9227e6a3141518aa6791d233272d003ce3a']}, } @phdthesis{Stou11, @@ -23650,6 +29301,8 @@ @article{Stou12 gsid = {3993348269768594312}, gscites = {8}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110666}, + ss_id = {e556afa60fd6588be8120f74d3a270930cb9869f}, + all_ss_ids = {['e556afa60fd6588be8120f74d3a270930cb9869f']}, } @book{Stoy18, @@ -23673,12 +29326,13 @@ @conference{Stud20 year = {2020}, optnote = {DIAG, RADIOLOGY}, abstract = {Background & objectives: Tumour budding, and T-cells are robust prognostic biomarkers in colorectal cancer. A combined analysis is complex and can be greatly expedited and automated using deep learning. The implementation of computer-based analysis in diagnostics is challenging and necessitates extensive validation. - -Methods: Randomly selected (n=61) double-stained immunohistochemical slides (AE1-AE3 pancytokeratin for tumour buds and CD8 for cytotoxic T-cells) from our pT1 cohort from 3 different institutions were used to validate the deep learning algorithms for tumour budding and CD8 T-cell detection developed by the International Budding Consortium Computational Pathology Group. Staining and scanning were performed in a single laboratory. - -Results: In the visually identified tumour budding hotspot (0.785 mm2), tumour buds were manually annotated, and the output of the T-cell algorithm manually corrected by a single observer. For budding, 645 out of the 1’306 buds were correctly identified by the algorithm. Recall and precision were 49.4% and 61.4%, respectively. For the T-cells, 89.3% were correctly detected (from a total of 16’296). The recall was 90.3% and the precision was 87.3%. Reasons for misclassified T-cells included staining intensity, suboptimal tissue recognition and slide artifacts. - -Conclusion: Our preliminary data demonstrates satisfactory results for T-cell detection. Automated budding detection is more difficult, as inter-observer variability of bud calling is high among experts. These issues merit consideration when developing reliable deep learning algorithms examining the tumour/host interface.},} + + Methods: Randomly selected (n=61) double-stained immunohistochemical slides (AE1-AE3 pancytokeratin for tumour buds and CD8 for cytotoxic T-cells) from our pT1 cohort from 3 different institutions were used to validate the deep learning algorithms for tumour budding and CD8 T-cell detection developed by the International Budding Consortium Computational Pathology Group. Staining and scanning were performed in a single laboratory. + + Results: In the visually identified tumour budding hotspot (0.785 mm2), tumour buds were manually annotated, and the output of the T-cell algorithm manually corrected by a single observer. For budding, 645 out of the 1'306 buds were correctly identified by the algorithm. Recall and precision were 49.4% and 61.4%, respectively. For the T-cells, 89.3% were correctly detected (from a total of 16'296). The recall was 90.3% and the precision was 87.3%. Reasons for misclassified T-cells included staining intensity, suboptimal tissue recognition and slide artifacts. + + Conclusion: Our preliminary data demonstrates satisfactory results for T-cell detection. Automated budding detection is more difficult, as inter-observer variability of bud calling is high among experts. These issues merit consideration when developing reliable deep learning algorithms examining the tumour/host interface.}, +} @inproceedings{Stud22, author = {Linda Studer and John-Melle Bokhorst and Francesco Ciompi and Andreas Fischer and Heather Dawson}, @@ -23688,16 +29342,17 @@ @inproceedings{Stud22 optnote = {DIAG, RADIOLOGY}, file = {Stud22.pdf:pdf/Stud22.pdf:PDF}, abstract = {Introduction -As pT1 colorectal cancers (CRC) tend to be overtreated, we investigate the previously proposed BTS (budding-T-cell-score = (#tumor-buds+1)/(#T-cells+1)) as a predictive marker to assess patients' need for resection. BTS was shown to be a better predictor of survival and other clinical factors than individual scoring. - -Materials and Methods -We consider hotspots annotated by a pathologist according to the ITBCC guidelines on double-stained (AE1-AE3 pan-cytokeratin and CD8+) WSI from our pT1 CRC cohort (N=573). Within hotspots, tumor-buds and T-cells are automatically detected using convolutional neural networks and counted. The patients are divided into two groups based on their need for resection (no: N0 / follow-up without recurrence; yes: N1 / follow-up with recurrence). The dataset is imbalanced (89.2%/10.8%). To predict the patient group, we train a support-vector machine with data-balancing using the tumor-buds or T-cell counts individually, together, and just the BTS. We report the weighted accuracy, and sensitivity and specificity for the “yes” group. - -Results -The highest weighted accuracy (62.8土6.5%) and precision (17.6土3.7%) are achieved using the tumor-buds count. Using the BTS achieves a sensitivity of 98.3土2.9%, which outperforms the other models by more than 30%. - -Conclusion -We show that combined assessment of tumor-buds and T-cells has the potential to serve as a predictive marker for the need of resection in pT1 cancers. However, there is still much room for improvement, as the low specificity still leads to overtreatment. We aim to address this in future work by also considering the spatial relationship of tumor-buds and T-cells and other predictive factors of nodal metastasis.},} + As pT1 colorectal cancers (CRC) tend to be overtreated, we investigate the previously proposed BTS (budding-T-cell-score = (#tumor-buds+1)/(#T-cells+1)) as a predictive marker to assess patients' need for resection. BTS was shown to be a better predictor of survival and other clinical factors than individual scoring. + + Materials and Methods + We consider hotspots annotated by a pathologist according to the ITBCC guidelines on double-stained (AE1-AE3 pan-cytokeratin and CD8+) WSI from our pT1 CRC cohort (N=573). Within hotspots, tumor-buds and T-cells are automatically detected using convolutional neural networks and counted. The patients are divided into two groups based on their need for resection (no: N0 / follow-up without recurrence; yes: N1 / follow-up with recurrence). The dataset is imbalanced (89.2%/10.8%). To predict the patient group, we train a support-vector machine with data-balancing using the tumor-buds or T-cell counts individually, together, and just the BTS. We report the weighted accuracy, and sensitivity and specificity for the "yes" group. + + Results + The highest weighted accuracy (62.8Tu 6.5%) and precision (17.6Tu 3.7%) are achieved using the tumor-buds count. Using the BTS achieves a sensitivity of 98.3Tu 2.9%, which outperforms the other models by more than 30%. + + Conclusion + We show that combined assessment of tumor-buds and T-cells has the potential to serve as a predictive marker for the need of resection in pT1 cancers. However, there is still much room for improvement, as the low specificity still leads to overtreatment. We aim to address this in future work by also considering the spatial relationship of tumor-buds and T-cells and other predictive factors of nodal metastasis.}, +} @article{Stur19, author = {Sturm, Bart and Creytens, David and Cook, Martin G and Smits, Jan and van Dijk, Marcory C R F and Eijken, Erik and Kurpershoek, Eline and Kusters-Vandevelde, Heidi V N and Ooms, Ariadne H A G and Wauters, Carla and Blokx, Willeke A M and van der Laak, Jeroen A W M}, @@ -23713,7 +29368,43 @@ @article{Stur19 optnote = {DIAG, RADIOLOGY}, pmid = {30972225}, gsid = {13384271727437089166}, - gscites = {1}, + gscites = {4}, + ss_id = {9ff29e9553f0d93ef599c5afa90f311cc907d507}, + all_ss_ids = {['9ff29e9553f0d93ef599c5afa90f311cc907d507']}, +} + +@article{Stur22, + author = {Sturm, Bart and Creytens, David and Smits, Jan and Ooms, Ariadne H. A. G. and Eijken, Erik and Kurpershoek, Eline and K\"{u}sters-Vandevelde, Heidi V. N. and Wauters, Carla and Blokx, Willeke A. M. and van der Laak, Jeroen A. W. M.}, + title = {~~Computer-Aided Assessment of Melanocytic Lesions by Means of a Mitosis Algorithm}, + doi = {10.3390/diagnostics12020436}, + year = {2022}, + abstract = {An increasing number of pathology laboratories are now fully digitised, using whole slide imaging (WSI) for routine diagnostics. WSI paves the road to use artificial intelligence (AI) that will play an increasing role in computer-aided diagnosis (CAD). In melanocytic skin lesions, the presence of a dermal mitosis may be an important clue for an intermediate or a malignant lesion and may indicate worse prognosis. In this study a mitosis algorithm primarily developed for breast carcinoma is applied to melanocytic skin lesions. This study aimed to assess whether the algorithm could be used in diagnosing melanocytic lesions, and to study the added value in diagnosing melanocytic lesions in a practical setting. WSI's of a set of hematoxylin and eosin (H&E) stained slides of 99 melanocytic lesions (35 nevi, 4 intermediate melanocytic lesions, and 60 malignant melanomas, including 10 nevoid melanomas), for which a consensus diagnosis was reached by three academic pathologists, were subjected to a mitosis algorithm based on AI. Two academic and six general pathologists specialized in dermatopathology examined the WSI cases two times, first without mitosis annotations and after a washout period of at least 2 months with mitosis annotations based on the algorithm. The algorithm indicated true mitosis in lesional cells, i.e., melanocytes, and non-lesional cells, i.e., mainly keratinocytes and inflammatory cells. A high number of false positive mitosis was indicated as well, comprising melanin pigment, sebaceous glands nuclei, and spindle cell nuclei such as stromal cells and neuroid differentiated melanocytes. All but one pathologist reported more often a dermal mitosis with the mitosis algorithm, which on a regular basis, was incorrectly attributed to mitoses from mainly inflammatory cells. The overall concordance of the pathologists with the consensus diagnosis for all cases excluding nevoid melanoma (n = 89) appeared to be comparable with and without the use of AI (89% vs. 90%). However, the concordance increased by using AI in nevoid melanoma cases (n = 10) (75% vs. 68%). This study showed that in general cases, pathologists perform similarly with the aid of a mitosis algorithm developed primarily for breast cancer. In nevoid melanoma cases, pathologists perform better with the algorithm. From this study, it can be learned that pathologists need to be aware of potential pitfalls using CAD on H&E slides, e.g., misinterpreting dermal mitoses in non-melanotic cells.}, + url = {http://dx.doi.org/10.3390/diagnostics12020436}, + file = {Stur22.pdf:pdf\\Stur22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Diagnostics}, + automatic = {yes}, + citation-count = {6}, + pages = {436}, + volume = {12}, + pmid = {35204526}, +} + +@article{Stög17, + author = {St\"{o}ger, Lauran and Schaefer-Prokop, Cornelia and Geurts, Bram H.J.}, + title = {~~Imaging of nontraumatic thoracic emergencies}, + doi = {10.1097/mcp.0000000000000355}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1097/MCP.0000000000000355}, + file = {Stog17.pdf:pdf\\Stog17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Current Opinion in Pulmonary Medicine}, + automatic = {yes}, + citation-count = {0}, + pages = {184-192}, + volume = {23}, + pmid = {28009644}, } @article{Sunoqrot22a, @@ -23726,6 +29417,26 @@ @article{Sunoqrot22a url = {https://eurradiolexp.springeropen.com/articles/10.1186/s41747-022-00288-8}, abstract = {Artificial intelligence (AI) for prostate magnetic resonance imaging (MRI) is starting to play a clinical role for prostate cancer (PCa) patients. AI-assisted reading is feasible, allowing workflow reduction. A total of 3,369 multi-vendor prostate MRI cases are available in open datasets, acquired from 2003 to 2021 in Europe or USA at 3 T (n = 3,018; 89.6%) or 1.5 T (n = 296; 8.8%), 346 cases scanned with endorectal coil (10.3%), 3,023 (89.7%) with phased-array surface coils; 412 collected for anatomical segmentation tasks, 3,096 for PCa detection/classification; for 2,240 cases lesions delineation is available and 56 cases have matching histopathologic images; for 2,620 cases the PSA level is provided; the total size of all open datasets amounts to approximately 253 GB. Of note, quality of annotations provided per dataset highly differ and attention must be paid when using these datasets (e.g., data overlap). Seven grand challenges and commercial applications from eleven vendors are here considered. Few small studies provided prospective validation. More work is needed, in particular validation on large-scale multi-institutional, well-curated public datasets to test general applicability. Moreover, AI needs to be explored for clinical stages other than detection/characterization (e.g., follow-up, prognosis, interventions, and focal treatment).}, optnote = {DIAG, RADIOLOGY}, + ss_id = {8c739636acd9cc63a1290e73bb34cf49b605d60d}, + all_ss_ids = {['8c739636acd9cc63a1290e73bb34cf49b605d60d']}, + gscites = {17}, +} + +@article{Suzu17, + author = {Suzuki, Yuriko and Fujima, Noriyuki and Ogino, Tetsuo and Meakin, James Alastair and Suwa, Akira and Sugimori, Hiroyuki and Van Cauteren, Marc and van Osch, Matthias J. P.}, + title = {~~Acceleration of ASL-based time-resolved MR angiography by acquisition of control and labeled images in the same shot (ACTRESS)}, + doi = {10.1002/mrm.26667}, + year = {2017}, + abstract = {PurposeNoncontrast 4D-MR-angiography (MRA) using arterial spin labeling (ASL) is beneficial because high spatial and temporal resolution can be achieved. However, ASL requires acquisition of labeled and control images for each phase. The purpose of this study is to present a new accelerated 4D-MRA approach that requires only a single control acquisition, achieving similar image quality in approximately half the scan time.MethodsIn a multi-phase Look-Locker sequence, the first phase was used as the control image and the labeling pulse was applied before the second phase. By acquiring the control and labeled images within a single Look-Locker cycle, 4D-MRA was generated in nearly half the scan time of conventional ASL. However, this approach potentially could be more sensitive to off-resonance and magnetization transfer (MT) effects. To counter this, careful optimizations of the labeling pulse were performed by Bloch simulations. In in-vivo studies arterial visualization was compared between the new and conventional ASL approaches.ResultsOptimization of the labeling pulse successfully minimized off-resonance effects. Qualitative assessment showed that residual MT effects did not degrade visualization of the peripheral arteries.ConclusionThis study demonstrated that the proposed approach achieved similar image quality as conventional ASL-MRA approaches in just over half the scan time. Magn Reson Med 79:224-233, 2018. (c) 2017 The Authors Magnetic Resonance in Medicine published by Wiley Periodicals, Inc. on behalf of International Society for Magnetic Resonance in Medicine. This is an open access article under the terms of the Creative Commons Attribution NonCommercial License, which permits use, distribution and reproduction in any medium, provided the original work is properly cited and is not used for commercial purposes.}, + url = {http://dx.doi.org/10.1002/mrm.26667}, + file = {Suzu17.pdf:pdf\\Suzu17.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Magnetic Resonance in Medicine}, + automatic = {yes}, + citation-count = {8}, + pages = {224-233}, + volume = {79}, + pmid = {28321915}, } @inproceedings{Swid18, @@ -23738,7 +29449,9 @@ @inproceedings{Swid18 file = {Swid18.pdf:pdf\\Swid18.pdf:PDF}, optnote = {DIAG}, gsid = {196320442164707556}, - gscites = {5}, + gscites = {12}, + ss_id = {1bbafc8586925a89fb6ebe0941a9cb7e95de452b}, + all_ss_ids = {['1bbafc8586925a89fb6ebe0941a9cb7e95de452b']}, } @article{Swid19, @@ -23756,7 +29469,9 @@ @article{Swid19 optnote = {DIAG, RADIOLOGY}, pmid = {31476576}, gsid = {6264470711036776042}, - gscites = {9}, + gscites = {95}, + ss_id = {ddf3d225ccaeb05a912051c554b38f722f365a1b}, + all_ss_ids = {['ddf3d225ccaeb05a912051c554b38f722f365a1b']}, } @inproceedings{Swid20, @@ -23768,10 +29483,13 @@ @inproceedings{Swid20 doi = {10.1117/12.2549650}, series = {SPIE}, abstract = {Diffuse large B-cell lymphoma (DLBCL) is the most common type of B-cell lymphoma. It is characterized by a heterogeneous morphology, genetic changes and clinical behavior. A small specific subgroup of DLBCL, harbouring a MYC gene translocation is associated with worse patient prognosis and outcome. Typically, the MYC translocation is assessed with a molecular test (FISH), that is expensive and time-consuming. Our hypothesis is that genetic changes, such as translocations could be visible as changes in the morphology of an HE-stained specimen. However, it has not proven possible to use morphological criteria for the detection of a MYC translocation in the diagnostic setting due to lack of specificity. - In this paper, we apply a deep learning model to automate detection of the MYC translocations in DLBCL based on HE-stained specimens. The proposed method works at the whole-slide level and was developed based on a multicenter data cohort of 91 patients. All specimens were stained with HE, and the MYC translocation was confirmed using fluorescence in situ hybridization (FISH). The system was evaluated on an additional 66 patients, and obtained AUROC of 0.83 and accuracy of 0.77. The proposed method presents proof of a concept giving insights in the applicability of deep learning methods for detection of a genetic changes in DLBCL. In future work we will evaluate our algorithm for automatic pre-screen of DLBCL specimens to obviate FISH analysis in a large number of patients.}, + In this paper, we apply a deep learning model to automate detection of the MYC translocations in DLBCL based on HE-stained specimens. The proposed method works at the whole-slide level and was developed based on a multicenter data cohort of 91 patients. All specimens were stained with HE, and the MYC translocation was confirmed using fluorescence in situ hybridization (FISH). The system was evaluated on an additional 66 patients, and obtained AUROC of 0.83 and accuracy of 0.77. The proposed method presents proof of a concept giving insights in the applicability of deep learning methods for detection of a genetic changes in DLBCL. In future work we will evaluate our algorithm for automatic pre-screen of DLBCL specimens to obviate FISH analysis in a large number of patients.}, file = {:pdf/Swid20.pdf:PDF}, optnote = {DIAG}, year = {2020}, + ss_id = {a9dc7e9f174dbd624d5a8294ca6dc9b671e1ae97}, + all_ss_ids = {['a9dc7e9f174dbd624d5a8294ca6dc9b671e1ae97']}, + gscites = {2}, } @inproceedings{Swid20a, @@ -23787,6 +29505,9 @@ @inproceedings{Swid20a abstract = {When diagnosing and reporting lung adenocarcinoma (LAC), pathologists currently include an assessment of histologic tumor growth patterns because the predominant growth pattern has been reported to impact prognosis. However, the subjective nature of manual slide evaluation contributes to suboptimal inter-pathologist variability in tumor growth pattern assessment. We applied a deep learning approach to identify and automatically delineate areas of four tumor growth patterns (solid, acinar, micropapillary, and cribriform) and non-tumor areas in whole-slide images (WSI) from resected LAC specimens. We trained a DenseNet model using patches from 109 slides collected at two institutions. The model was tested using 56 WSIs including 20 that were collected at a third institution. Using the same slide set, the concordance between the DenseNet model and an experienced pathologist (blinded to the DenseNet results) in determining the predominant tumor growth pattern was substantial(kappa score = 0.603). Using a subset of 36 test slides that were manually annotated for tumor growth patterns, we also measured the F1-score for each growth pattern: 0.95 (solid), 0.78 (acinar), 0.76 (micropapillary), 0.28 (cribriform) and 0.97 (non-tumor). Our results suggest that DenseNet assessment of WSIs with solid, acinar, and micropapillary predominant tumor growth is more robust than for the WSIs with predominant cribriform growth which are less frequently encountered.}, file = {Swid20a.pdf:pdf\\Swid20a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {424bfbd0fcda2f8e5985acf173a5111af22f553d}, + all_ss_ids = {['424bfbd0fcda2f8e5985acf173a5111af22f553d']}, + gscites = {4}, } @article{Swid20b, @@ -23804,6 +29525,9 @@ @article{Swid20b optnote = {DIAG}, pmid = {32873856}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/225966}, + ss_id = {ec43e78642d133ac0b19192ca26d0fcb0ba14a99}, + all_ss_ids = {['ec43e78642d133ac0b19192ca26d0fcb0ba14a99']}, + gscites = {40}, } @article{Swid20c, @@ -23816,6 +29540,9 @@ @article{Swid20c abstract = {In patients with suspected lymphoma, the tissue biopsy provides lymphoma confirmation, classification, and prognostic factors, including genetic changes. We developed a deep learning algorithm to detect MYC rearrangement in scanned histological slides of diffuse large B-cell lymphoma. The H&E-stained slides of 287 cases from 11 hospitals were used for training and evaluation. The overall sensitivity to detect MYC rearrangement was 0.93 and the specificity 0.52, showing that prediction of MYC translocation based on morphology alone was possible in 93% of MYC-rearranged cases. This would allow a simple and fast prescreening, saving approximately 34% of genetic tests with the current algorithm.}, file = {Swid20c.pdf:pdf\\Swid20c.pdf:PDF}, optnote = {DIAG}, + ss_id = {0a79db68b14b74dfd7637ed5b8a0a7aa2956e3ea}, + all_ss_ids = {['0a79db68b14b74dfd7637ed5b8a0a7aa2956e3ea']}, + gscites = {14}, } @inproceedings{Swid20d, @@ -23827,6 +29554,26 @@ @inproceedings{Swid20d abstract = {In this study, we introduce a technique to generate synthetic histologic image data by blending parts of different images into a new image patch. The proposed approach, which we call multi-patch blending(MPB), crops parts of two histologic images of tumor growth patterns annotated with single but different labels and pastes them into a newly created image patch comprising areas with two different annotations. A Cycle-GAN model is employed in MPB to smooth out transitions between the pasted image crops and make the output image patch look realistic. The goal of implementing the MPB is to support the task of semantic segmentation of lung adenocarcinoma grown patterns in whole-slide images (WSI). We used MPB to increase the number of training patches extracted from a set of 18 WSIs with sparse annotations. De facto, MPB was implemented as a novel data augmentation strategy. U-Net trained with MPB-generated patches achieved 13% higher F1-score than the U-Net trained with original (sparsely annotated) patches in a 4-classsemantic segmentation task.}, file = {Swid20d.pdf:pdf\\Swid20d.pdf:PDF}, optnote = {DIAG}, + ss_id = {71cda147901a9122f82bb855f7e91c74e44eabbf}, + all_ss_ids = {['71cda147901a9122f82bb855f7e91c74e44eabbf']}, + gscites = {1}, +} + +@article{Swil23, + author = {Swillens, Julie E. M. and Nagtegaal, Iris D. and Engels, Sam and Lugli, Alessandro and Hermens, Rosella P. M. G. and van der Laak, Jeroen A. W. M.}, + title = {~~Pathologists' first opinions on barriers and facilitators of computational pathology adoption in oncological pathology: an international study}, + doi = {10.1038/s41388-023-02797-1}, + year = {2023}, + abstract = {AbstractComputational pathology (CPath) algorithms detect, segment or classify cancer in whole slide images, approaching or even exceeding the accuracy of pathologists. Challenges have to be overcome before these algorithms can be used in practice. We therefore aim to explore international perspectives on the future role of CPath in oncological pathology by focusing on opinions and first experiences regarding barriers and facilitators. We conducted an international explorative eSurvey and semi-structured interviews with pathologists utilizing an implementation framework to classify potential influencing factors. The eSurvey results showed remarkable variation in opinions regarding attitude, understandability and validation of CPath. Interview results showed that barriers focused on the quality of available evidence, while most facilitators concerned strengths of CPath. A lack of consensus was present for multiple factors, such as the determination of sufficient validation using CPath, the preferred function of CPath within the digital workflow and the timing of CPath introduction in pathology education. The diversity in opinions illustrates variety in influencing factors in CPath adoption. A next step would be to quantitatively determine important factors for adoption and initiate validation studies. Both should include clear case descriptions and be conducted among a more homogenous panel of pathologists based on sub specialization.}, + url = {http://dx.doi.org/10.1038/s41388-023-02797-1}, + file = {Swil23.pdf:pdf\\Swil23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Oncogene}, + automatic = {yes}, + citation-count = {0}, + pages = {2816-2827}, + volume = {42}, + pmid = {37587332}, } @article{Takx14, @@ -23844,7 +29591,9 @@ @article{Takx14 pmid = {25182625}, month = {9}, gsid = {2023434764815299292}, - gscites = {5}, + gscites = {7}, + ss_id = {b5233edf02fcfa9c994546ed8590246e2f432ec3}, + all_ss_ids = {['b5233edf02fcfa9c994546ed8590246e2f432ec3']}, } @article{Tamm18, @@ -23862,7 +29611,9 @@ @article{Tamm18 pmid = {30368011}, month = {2}, gsid = {13773357066499407544}, - gscites = {8}, + gscites = {34}, + ss_id = {e6a0940e90b9a661ff2accfa5e8eefe56fd35263}, + all_ss_ids = {['e6a0940e90b9a661ff2accfa5e8eefe56fd35263']}, } @inproceedings{Tan11, @@ -23879,7 +29630,9 @@ @inproceedings{Tan11 optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {15037460857605703476}, - gscites = {7}, + gscites = {8}, + ss_id = {2704781e2ff16f7f31bd97e4d393e685b9ab0875}, + all_ss_ids = {['2704781e2ff16f7f31bd97e4d393e685b9ab0875']}, } @inproceedings{Tan11c, @@ -23914,7 +29667,9 @@ @inproceedings{Tan12 optnote = {DIAG, RADIOLOGY}, month = {2}, gsid = {1527121090592760430}, - gscites = {5}, + gscites = {6}, + ss_id = {443e44a373875423637e9b519b451031635f2d35}, + all_ss_ids = {['443e44a373875423637e9b519b451031635f2d35']}, } @article{Tan12a, @@ -23934,6 +29689,8 @@ @article{Tan12a gsid = {920988454317757286}, gscites = {75}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110664}, + ss_id = {f3b57b52566e70cc625e741ef1b2a388175f989d}, + all_ss_ids = {['f3b57b52566e70cc625e741ef1b2a388175f989d']}, } @inproceedings{Tan13, @@ -23949,6 +29706,8 @@ @inproceedings{Tan13 month = {2}, gsid = {13915316277612875833}, gscites = {4}, + ss_id = {0a0094567a4b50f0274c729c55d272608d1cdafc}, + all_ss_ids = {['0a0094567a4b50f0274c729c55d272608d1cdafc']}, } @article{Tan13a, @@ -23965,7 +29724,9 @@ @article{Tan13a pmid = {23693128}, month = {9}, gsid = {7682914345881547785}, - gscites = {70}, + gscites = {93}, + ss_id = {8e7a787fc98d409a3f6c63826e53c60210894c19}, + all_ss_ids = {['8e7a787fc98d409a3f6c63826e53c60210894c19']}, } @article{Tan13c, @@ -23983,6 +29744,8 @@ @article{Tan13c month = {12}, gsid = {17157262863361426204}, gscites = {19}, + ss_id = {534510f570af42f0affc5967dbcaefeb25948aa8}, + all_ss_ids = {['534510f570af42f0affc5967dbcaefeb25948aa8']}, } @inproceedings{Tan13d, @@ -24009,7 +29772,9 @@ @article{Tan13e pmid = {24119350}, month = {11}, gsid = {15955819716225519808}, - gscites = {26}, + gscites = {34}, + ss_id = {8139e856e3807c839ad70161f5e815552a083a64}, + all_ss_ids = {['8139e856e3807c839ad70161f5e815552a083a64']}, } @phdthesis{Tan14, @@ -24040,7 +29805,9 @@ @article{Tan15 pmid = {25832040}, month = {3}, gsid = {16482522359454166873}, - gscites = {26}, + gscites = {35}, + ss_id = {61d67f082bb776ac3a897dd2109bcb61d3660e7b}, + all_ss_ids = {['61d67f082bb776ac3a897dd2109bcb61d3660e7b']}, } @article{Tan16, @@ -24054,35 +29821,37 @@ @article{Tan16 doi = {10.1118/1.4953206}, url = {http://scitation.aip.org/content/aapm/journal/medphys/43/7/10.1118/1.4953206}, abstract = {Purpose: Automated 3D breast ultrasound (ABUS) has been proposed as a complementary screening - modality to mammography for early detection of breast cancers. To facilitate the interpretation - of ABUS images, automated diagnosis and detection techniques are being developed, in which - malignant lesion segmentation plays an important role. However, automated segmentation of cancer - in ABUS is challenging since lesion edges might not be well defined. In this study, the authors aim - at developing an automated segmentation method for malignant lesions in ABUS that is robust to - ill-defined cancer edges and posterior shadowing. - - Methods: A segmentation method using depth-guided dynamic programming based on spiral scanning - is proposed. The method automatically adjusts aggressiveness of the segmentation according - to the position of the voxels relative to the lesion center. Segmentation is more aggressive in the - upper part of the lesion (close to the transducer) than at the bottom (far away from the transducer), - where posterior shadowing is usually visible. The authors used Dice similarity coefficient (Dice) - for evaluation. The proposed method is compared to existing state of the art approaches such as - graph cut, level set, and smart opening and an existing dynamic programming method without depth - dependence. - - Results: In a dataset of 78 cancers, our proposed segmentation method achieved a mean Dice of - 0.73+-0.14. The method outperforms an existing dynamic programming method (0.70+-0.16) on this - task (p = 0.03) and it is also significantly (p < 0.001) better than graph cut (0.66+-0.18), level set - based approach (0.63+-0.20) and smart opening (0.65+-0.12). - - Conclusions: The proposed depth-guided dynamic programming method achieves accurate breast - malignant lesion segmentation results in automated breast ultrasound.}, + modality to mammography for early detection of breast cancers. To facilitate the interpretation + of ABUS images, automated diagnosis and detection techniques are being developed, in which + malignant lesion segmentation plays an important role. However, automated segmentation of cancer + in ABUS is challenging since lesion edges might not be well defined. In this study, the authors aim + at developing an automated segmentation method for malignant lesions in ABUS that is robust to + ill-defined cancer edges and posterior shadowing. + + Methods: A segmentation method using depth-guided dynamic programming based on spiral scanning + is proposed. The method automatically adjusts aggressiveness of the segmentation according + to the position of the voxels relative to the lesion center. Segmentation is more aggressive in the + upper part of the lesion (close to the transducer) than at the bottom (far away from the transducer), + where posterior shadowing is usually visible. The authors used Dice similarity coefficient (Dice) + for evaluation. The proposed method is compared to existing state of the art approaches such as + graph cut, level set, and smart opening and an existing dynamic programming method without depth + dependence. + + Results: In a dataset of 78 cancers, our proposed segmentation method achieved a mean Dice of + 0.73+-0.14. The method outperforms an existing dynamic programming method (0.70+-0.16) on this + task (p = 0.03) and it is also significantly (p < 0.001) better than graph cut (0.66+-0.18), level set + based approach (0.63+-0.20) and smart opening (0.65+-0.12). + + Conclusions: The proposed depth-guided dynamic programming method achieves accurate breast + malignant lesion segmentation results in automated breast ultrasound.}, file = {Tan16.pdf:pdf\\Tan16.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {27370126}, month = {6}, gsid = {1950157939420291392}, - gscites = {8}, + gscites = {15}, + ss_id = {16018e9f637a1353bcf8136c4643cea5d0de8bbd}, + all_ss_ids = {['16018e9f637a1353bcf8136c4643cea5d0de8bbd']}, } @article{Tanc10, @@ -24102,6 +29871,8 @@ @article{Tanc10 gsid = {684371408799077124}, gscites = {4}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/88094}, + ss_id = {d1a97815a9fa5d25ea4b55acb51cbc582c35407e}, + all_ss_ids = {['d1a97815a9fa5d25ea4b55acb51cbc582c35407e']}, } @inproceedings{Tann11a, @@ -24131,7 +29902,9 @@ @article{Tell18 pmid = {29994086}, month = {9}, gsid = {664853976239237267}, - gscites = {45}, + gscites = {189}, + ss_id = {d188bdc40ffea984ce94b88b21fd5d3d3d9972d3}, + all_ss_ids = {['d188bdc40ffea984ce94b88b21fd5d3d3d9972d3']}, } @inproceedings{Tell18a, @@ -24146,6 +29919,9 @@ @inproceedings{Tell18a file = {Tell18a.pdf:pdf/Tell18b.pdf:PDF}, optnote = {DIAG}, month = {3}, + ss_id = {6f55cc94e6a7b1e7f54b77aecf6b57773378fc21}, + all_ss_ids = {['6f55cc94e6a7b1e7f54b77aecf6b57773378fc21']}, + gscites = {39}, } @inproceedings{Tell18b, @@ -24158,7 +29934,9 @@ @inproceedings{Tell18b file = {:pdf/Tell18b.pdf:PDF}, optnote = {DIAG}, gsid = {13709089197553633779}, - gscites = {1}, + gscites = {10}, + ss_id = {eebf6be2e906c922ee045c946b230b99dd9c86e0}, + all_ss_ids = {['eebf6be2e906c922ee045c946b230b99dd9c86e0']}, } @article{Tell19a, @@ -24176,7 +29954,9 @@ @article{Tell19a optnote = {DIAG, RADIOLOGY}, pmid = {31466046}, gsid = {9638664310854063699}, - gscites = {28}, + gscites = {350}, + ss_id = {35ee6606ec99b5bf282a0c5f400edbd16a6e22d9}, + all_ss_ids = {['35ee6606ec99b5bf282a0c5f400edbd16a6e22d9', 'd6e975989345b69f539dbb8f22cb3437f5cc5039']}, } @inproceedings{Tell20, @@ -24187,6 +29967,9 @@ @inproceedings{Tell20 abstract = {We focus on the problem of training convolutional neural networks on gigapixel histopathology images to predict image-level targets. For this purpose, we extend Neural Image Compression (NIC), an image compression framework that reduces the dimensionality of these images using an encoder network trained unsupervisedly. We propose to train this encoder using supervised multitask learning (MTL) instead. We applied the proposed MTL NIC to two histopathology datasets and three tasks. First, we obtained state-of-the-art results in the Tumor Proliferation Assessment Challenge of 2016 (TUPAC16). Second, we successfully classified histopathological growth patterns in images with colorectal liver metastasis (CLM). Third, we predicted patient risk of death by learning directly from overall survival in the same CLM data. Our experimental results suggest that the representations learned by the MTL objective are: (1) highly specific, due to the supervised training signal, and (2) transferable, since the same features perform well across different tasks. Additionally, we trained multiple encoders with different training objectives, e.g. unsupervised and variants of MTL, and observed a positive correlation between the number of tasks in MTL and the system performance on the TUPAC16 dataset.}, file = {Tell20.pdf:pdf\\Tell20.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + ss_id = {6441d36a0af25f29abc3a21a012daf1ea9d28594}, + all_ss_ids = {['6441d36a0af25f29abc3a21a012daf1ea9d28594']}, + gscites = {18}, } @article{Tell21, @@ -24204,6 +29987,9 @@ @article{Tell21 pmid = {31442971}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/230096}, + ss_id = {fed7c0d369d8991f6b76a661dedd0d1768a402d2}, + all_ss_ids = {['fed7c0d369d8991f6b76a661dedd0d1768a402d2', '44454c9090606d0332272ff38df6c87eac15f5f7']}, + gscites = {154}, } @phdthesis{Tell21a, @@ -24211,9 +29997,9 @@ @phdthesis{Tell21a title = {Advancing computational pathology with deep learning: from patches to gigapixel image-level classification}, url = {https://repository.ubn.ru.nl/handle/2066/233752}, abstract = {The main focus of this work is to investigate novel deep learning based methodologies to improve breast cancer prognostic tools within the context of Computational Pathology. This research can be divided into three key blocks: - 1. Fundamental challenges in Computational Pathology. We address some of the issues that arise when developing deep learning based models across applications and organs. First, scaling the generation of pixel-level annotated data (Chapter 2). Second, addressing intra- and inter-center stain variation (Chapters 2 and 3). Third, developing accurate and fast models to process entire whole-slide images (Chapters 2 and 4). - 2. Automating the core component of breast cancer grading: performing mitosis detection at scale, that is, processing thousands of unseen multicenter entire whole-slide images, while deriving actionable insights for pathologists (Chapter 2). - 3. Performing whole-slide image classification. We propose a method that enables feeding entire whole-slide images to a single deep learning based model , targeting patient-level labels and outcome data such as overall survival(Chapters 4 and 5).}, + 1. Fundamental challenges in Computational Pathology. We address some of the issues that arise when developing deep learning based models across applications and organs. First, scaling the generation of pixel-level annotated data (Chapter 2). Second, addressing intra- and inter-center stain variation (Chapters 2 and 3). Third, developing accurate and fast models to process entire whole-slide images (Chapters 2 and 4). + 2. Automating the core component of breast cancer grading: performing mitosis detection at scale, that is, processing thousands of unseen multicenter entire whole-slide images, while deriving actionable insights for pathologists (Chapter 2). + 3. Performing whole-slide image classification. We propose a method that enables feeding entire whole-slide images to a single deep learning based model , targeting patient-level labels and outcome data such as overall survival(Chapters 4 and 5).}, copromotor = {F. Ciompi and G. Litjens}, file = {Tell21a.pdf:pdf\\Tell21a.pdf:PDF}, optnote = {DIAG}, @@ -24223,6 +30009,53 @@ @phdthesis{Tell21a journal = {PhD thesis}, } +@article{Terh20, + author = {Terheyden, Jan H. and Holz, Frank G. and Schmitz-Valckenberg, Steffen and L\"{u}ning, Anna and Schmid, Matthias and Rubin, Gary S. and Dunbar, Hannah and Tufail, Adnan and Crabb, David P. and Binns, Alison and S\'{a}nchez, Clara I. and Hoyng, Carel and Margaron, Philippe and Zakaria, Nadia and Durbin, Mary and Luhmann, Ulrich and Zamiri, Parisa and Cunha-Vaz, Jos\'{e} and Martinho, Cec\'{i}lia and Leal, Sergio and Finger, Robert P. and Basile, P. and Behning, C. and Berger, M. and Binns, A. and B\"{o}ttger, M. and Bouchet, C. and Brazier, J. E. and Butt, T. and Carapezzi, C. and Carlton, J. and Charil, A. and Coimbra, R. and Nunes, S. and Crabb, D. and Cunha-Vaz, J. and Dunbar, H. and Durbin, M. and Finger, R. and Holz, F. and Hoyng, C. and Kr\"{a}tzschmar, J. and Leal, S. and Luhmann, U. and L\"{u}ning, A. and Margaron, Ph. and Martinho, C. and Mel\'{i}cio, B. and Mohand-Sa\"{i}d, S. and Rowen, D. and Rubin, G. S. and Sahel, J. and S\'{a}nchez, C. I. and Sanches Fernandes, D. and Schmid, M. and Schmitz-Valckenberg, S. and Skelly, A. and St\"{o}hr, L. and Taylor, D. and Terheyden, J. and Tufail, A. and Vieweg, L. and Wintergerst, L. and Wojek, C. and Zakaria, N. and Zamiri, P. and on behalf of the MACUSTAR consortium}, + title = {~~Clinical study protocol for a low-interventional study in intermediate age-related macular degeneration developing novel clinical endpoints for interventional clinical trials with a regulatory and patient access intention--MACUSTAR}, + doi = {10.1186/s13063-020-04595-6}, + year = {2020}, + abstract = {AbstractBackgroundThere is an unmet need for treatment options in intermediate age-related macular degeneration (iAMD). However, for any new interventions to be tested in clinical trials, novel currently unavailable clinical endpoints need to be developed. Thus, the MACUSTAR study aims to develop and evaluate functional, structural, and patient-reported candidate endpoints for use in future iAMD trials.MethodsThe protocol describes a low-interventional clinical multicenter study employing a novel two-part design. The cross-sectional part (total duration, 1 month) and the longitudinal part (total duration, 36 months) include participants with iAMD and control groups with early/late/no AMD. The cross-sectional part's primary objective is a technical evaluation of functional, structural, and patient-reported candidate outcomes. The longitudinal part's primary objective is to assess the prognostic power of changes in functional, structural, and patient-reported outcomes for progression from iAMD to late AMD. All data will be used to support a biomarker qualification procedure by regulatory authorities.DiscussionThe MACUSTAR study characterizes and evaluates much needed novel functional, structural, and patient-reported endpoints for future clinical trials in iAMD and will improve our understanding of the natural history and prognostic markers of this condition.Trial registrationClinicalTrials.govNCT03349801. Registered on 22 November 2017}, + url = {http://dx.doi.org/10.1186/s13063-020-04595-6}, + file = {Terh20.pdf:pdf\\Terh20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Trials}, + automatic = {yes}, + citation-count = {20}, + volume = {21}, + pmid = {32682441}, +} + +@article{Terh21, + author = {Terheyden, Jan Henrik and on behalf of the MACUSTAR consortium and Behning, Charlotte and L\"{u}ning, Anna and Wintergerst, Ludmila and Basile, Pier G. and Tavares, Diana and Mel\'{i}cio, Beatriz A. and Leal, Sergio and Weissgerber, George and Luhmann, Ulrich F. O. and Crabb, David P. and Tufail, Adnan and Hoyng, Carel and Berger, Moritz and Schmid, Matthias and Silva, Rufino and Martinho, Cec\'{i}lia V. and Cunha-Vaz, Jos\'{e} and Holz, Frank G. and Finger, Robert P.}, + title = {~~Challenges, facilitators and barriers to screening study participants in early disease stages-experience from the MACUSTAR study}, + doi = {10.1186/s12874-021-01243-8}, + year = {2021}, + abstract = {Abstract + Background + Recruiting asymptomatic participants with early disease stages into studies is challenging and only little is known about facilitators and barriers to screening and recruitment of study participants. Thus we assessed factors associated with screening rates in the MACUSTAR study, a multi-centre, low-interventional cohort study of early stages of age-related macular degeneration (AMD). + + Methods + Screening rates per clinical site and per week were compiled and applicable recruitment factors were assigned to respective time periods. A generalized linear mixed-effects model including the most relevant recruitment factors identified via in-depth interviews with study personnel was fitted to the screening data. Only participants with intermediate AMD were considered. + + Results + A total of 766 individual screenings within 87 weeks were available for analysis. The mean screening rate was 0.6 +- 0.9 screenings per week among all sites. The participation at investigator teleconferences (relative risk increase 1.466, 95% CI [1.018-2.112]), public holidays (relative risk decrease 0.466, 95% CI [0.367-0.591]) and reaching 80% of the site's recruitment target (relative risk decrease 0.699, 95% CI [0.367-0.591]) were associated with the number of screenings at an individual site level. + + Conclusions + Careful planning of screening activities is necessary when recruiting early disease stages in multi-centre observational or low-interventional studies. Conducting teleconferences with local investigators can increase screening rates. When planning recruitment, seasonal and saturation effects at clinical site level need to be taken into account. + + Trial registration + ClinicalTrials.govNCT03349801. Registered on 22 November 2017. + }, + url = {http://dx.doi.org/10.1186/s12874-021-01243-8}, + file = {Terh21.pdf:pdf\\Terh21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {BMC Medical Research Methodology}, + automatic = {yes}, + citation-count = {4}, + volume = {21}, + pmid = {33731014}, +} + @conference{Teus13, author = {M. M. Teussink and M. J. J. P. van Grinsven and B. Cense and C. B. Hoyng and J. Klevering and T. Theelen}, title = {Functional optical coherence tomography with a commercial device - a pilot study}, @@ -24282,8 +30115,6 @@ @article{Teuw14 optnote = {DIAG}, } - - @article{Teuw16b, author = {Teuwen, J.}, title = {{On the integral kernels of derivatives of the Ornstein-Uhlenbeck semigroup}}, @@ -24305,34 +30136,34 @@ @conference{Teuw17b booktitle = RSNA, year = {2017}, abstract = {PURPOSE - In this study we evaluated the potential of a computer system to select exams with low likelihood of - containing cancer. - - METHOD AND MATERIALS - We collected a representative set of 1649 referrals with different screening outcome from the Dutch - breast cancer screening. The dataset comprised 489 true positives (TP) exams and 1160 false - positive (FP) exams. In addition, we collected 1000 true negative (TN) exams from the same - screening population. All exams were automatically analyzed with Transpara v1.2.0 (ScreenPoint - Medical, Nijmegen, The Netherlands). Transpara uses deep learning algorithms to, based on - soft-tissue lesions and calcifications findings, categorize every mammogram on a 10-point scale. This - computerized score represents the likelihood that a cancer is present in the exam at hand, where 10 - represents the highest likelihood that a cancer is present. It is defined in such a way that, in a - screening setting, the number of mammograms in each category is roughly equal. - - In this study, we determined the distribution of the computerized cancer likelihood scores for the TP, - FP and TN exams. In particular we quantified for each category the fraction of cases with a cancer - likelihood score below or equal to 5, including about 50% of the mammograms. Additionally we - evaluated the positive predictive value (PPV) of referrals in each likelihood category. - - RESULTS - 5.11% of the TPs, 20.3% of the FPs and 45.0% of the TNs were assigned to the likelihood categories - 1 to 5. This corresponds to 0.7 cancers per 1000 in the group with score 1-5 and 11.2 per 1000 with a - score higher than 5, based on the cancer detection rate of 6.5/1000 in the Dutch screening program. - The PPV was 8.00%, 8.14%, and 44.9% for cancer likelihood scores 1, 5 and 10, respectively. - - CONCLUSION - Automated identification of a fraction of screening mammograms that most likely are normal is - feasible.}, + In this study we evaluated the potential of a computer system to select exams with low likelihood of + containing cancer. + + METHOD AND MATERIALS + We collected a representative set of 1649 referrals with different screening outcome from the Dutch + breast cancer screening. The dataset comprised 489 true positives (TP) exams and 1160 false + positive (FP) exams. In addition, we collected 1000 true negative (TN) exams from the same + screening population. All exams were automatically analyzed with Transpara v1.2.0 (ScreenPoint + Medical, Nijmegen, The Netherlands). Transpara uses deep learning algorithms to, based on + soft-tissue lesions and calcifications findings, categorize every mammogram on a 10-point scale. This + computerized score represents the likelihood that a cancer is present in the exam at hand, where 10 + represents the highest likelihood that a cancer is present. It is defined in such a way that, in a + screening setting, the number of mammograms in each category is roughly equal. + + In this study, we determined the distribution of the computerized cancer likelihood scores for the TP, + FP and TN exams. In particular we quantified for each category the fraction of cases with a cancer + likelihood score below or equal to 5, including about 50% of the mammograms. Additionally we + evaluated the positive predictive value (PPV) of referrals in each likelihood category. + + RESULTS + 5.11% of the TPs, 20.3% of the FPs and 45.0% of the TNs were assigned to the likelihood categories + 1 to 5. This corresponds to 0.7 cancers per 1000 in the group with score 1-5 and 11.2 per 1000 with a + score higher than 5, based on the cancer detection rate of 6.5/1000 in the Dutch screening program. + The PPV was 8.00%, 8.14%, and 44.9% for cancer likelihood scores 1, 5 and 10, respectively. + + CONCLUSION + Automated identification of a fraction of screening mammograms that most likely are normal is + feasible.}, optnote = {DIAG}, } @@ -24344,6 +30175,62 @@ @article{Teuw18 abstract = {A general method is presented for determining the maximum electric energy in a bounded region of optical fields with given time-averaged flux of electromagnetic energy. Time-harmonic fields are considered whose plane wave expansion consists of propagating plane waves only, i.e., evanescent waves are excluded. The bounded region can be quite general: it can consist of finitely many points, or be a curve, a curved surface or a bounded volume. The optimum optical field is eigenfield corresponding to the maximum eigenvalue of a compact linear integral operator which depends on the bounded region. It is explained how these optimum fields can be realized by focusing appropriate pupil fields. The special case that the region is a circular disc perpendicular to the direction of optical axis is investigated by numerical simulations.}, optnote = {DIAG}, month = {1}, + ss_id = {da42a50fd33cb07f6419f0cd1004b2eda9bdc06a}, + all_ss_ids = {['da42a50fd33cb07f6419f0cd1004b2eda9bdc06a']}, + gscites = {0}, +} + +@article{Teuw21, + author = {Teuwen, Jonas and Moriakov, Nikita and Fedon, Christian and Caballo, Marco and Reiser, Ingrid and Bakic, Pedrag and Garc\'{i}a, Eloy and Diaz, Oliver and Michielsen, Koen and Sechopoulos, Ioannis}, + title = {~~Deep learning reconstruction of digital breast tomosynthesis images for accurate breast density and patient-specific radiation dose estimation}, + doi = {10.1016/j.media.2021.102061}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.media.2021.102061}, + file = {Teuw21.pdf:pdf\\Teuw21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Image Analysis}, + automatic = {yes}, + citation-count = {17}, + pages = {102061}, + volume = {71}, + pmid = {33910108}, +} + +@article{Thag23, + author = {Thagaard, Jeppe and Broeckx, Glenn and Page, David B and Jahangir, Chowdhury Arif and Verbandt, Sara and Kos, Zuzana and Gupta, Rajarsi and Khiroya, Reena and Abduljabbar, Khalid and Acosta Haab, Gabriela and Acs, Balazs and Akturk, Guray and Almeida, Jonas S and Alvarado-Cabrero, Isabel and Amgad, Mohamed and Azmoudeh-Ardalan, Farid and Badve, Sunil and Baharun, Nurkhairul Bariyah and Balslev, Eva and Bellolio, Enrique R and Bheemaraju, Vydehi and Blenman, Kim RM and Botinelly Mendon\c{c}a Fujimoto, Luciana and Bouchmaa, Najat and Burgues, Octavio and Chardas, Alexandros and Chon U Cheang, Maggie and Ciompi, Francesco and Cooper, Lee AD and Coosemans, An and Corredor, Germ\'{a}n and Dahl, Anders B and Dantas Portela, Flavio Luis and Deman, Frederik and Demaria, Sandra and Dor\'{e} Hansen, Johan and Dudgeon, Sarah N and Ebstrup, Thomas and Elghazawy, Mahmoud and Fernandez-Mart\'{i}n, Claudio and Fox, Stephen B and Gallagher, William M and Giltnane, Jennifer M and Gnjatic, Sacha and Gonzalez-Ericsson, Paula I and Grigoriadis, Anita and Halama, Niels and Hanna, Matthew G and Harbhajanka, Aparna and Hart, Steven N and Hartman, Johan and Hauberg, S\oren and Hewitt, Stephen and Hida, Akira I and Horlings, Hugo M and Husain, Zaheed and Hytopoulos, Evangelos and Irshad, Sheeba and Janssen, Emiel AM and Kahila, Mohamed and Kataoka, Tatsuki R and Kawaguchi, Kosuke and Kharidehal, Durga and Khramtsov, Andrey I and Kiraz, Umay and Kirtani, Pawan and Kodach, Liudmila L and Korski, Konstanty and Kov\'{a}cs, Anik\'{o} and Laenkholm, Anne-Vibeke and Lang-Schwarz, Corinna and Larsimont, Denis and Lennerz, Jochen K and Lerousseau, Marvin and Li, Xiaoxian and Ly, Amy and Madabhushi, Anant and Maley, Sai K and Manur Narasimhamurthy, Vidya and Marks, Douglas K and McDonald, Elizabeth S and Mehrotra, Ravi and Michiels, Stefan and Minhas, Fayyaz ul Amir Afsar and Mittal, Shachi and Moore, David A and Mushtaq, Shamim and Nighat, Hussain and Papathomas, Thomas and Penault-Llorca, Frederique and Perera, Rashindrie D and Pinard, Christopher J and Pinto-Cardenas, Juan Carlos and Pruneri, Giancarlo and Pusztai, Lajos and Rahman, Arman and Rajpoot, Nasir Mahmood and Rapoport, Bernardo Leon and Rau, Tilman T and Reis-Filho, Jorge S and Ribeiro, Joana M and Rimm, David and Roslind, Anne and Vincent-Salomon, Anne and Salto-Tellez, Manuel and Saltz, Joel and Sayed, Shahin and Scott, Ely and Siziopikou, Kalliopi P and Sotiriou, Christos and Stenzinger, Albrecht and Sughayer, Maher A and Sur, Daniel and Fineberg, Susan and Symmans, Fraser and Tanaka, Sunao and Taxter, Timothy and Tejpar, Sabine and Teuwen, Jonas and Thompson, E Aubrey and Tramm, Trine and Tran, William T and van der Laak, Jeroen and van Diest, Paul J and Verghese, Gregory E and Viale, Giuseppe and Vieth, Michael and Wahab, Noorul and Walter, Thomas and Waumans, Yannick and Wen, Hannah Y and Yang, Wentao and Yuan, Yinyin and Zin, Reena Md and Adams, Sylvia and Bartlett, John and Loibl, Sibylle and Denkert, Carsten and Savas, Peter and Loi, Sherene and Salgado, Roberto and Specht Stovgaard, Elisabeth}, + title = {Pitfalls in machine learning-based assessment of tumor-infiltrating lymphocytes in breast cancer: A report of the International Immuno-Oncology Biomarker Working Group on Breast Cancer}, + doi = {10.1002/path.6155}, + year = {2023}, + abstract = {AbstractThe clinical significance of the tumor-immune interaction in breast cancer is now established, and tumor-infiltrating lymphocytes (TILs) have emerged as predictive and prognostic biomarkers for patients with triple-negative (estrogen receptor, progesterone receptor, and HER2-negative) breast cancer and HER2-positive breast cancer. How computational assessments of TILs might complement manual TIL assessment in trial and daily practices is currently debated. Recent efforts to use machine learning (ML) to automatically evaluate TILs have shown promising results. We review state-of-the-art approaches and identify pitfalls and challenges of automated TIL evaluation by studying the root cause of ML discordances in comparison to manual TIL quantification. We categorize our findings into four main topics: (1) technical slide issues, (2) ML and image analysis aspects, (3) data challenges, and (4) validation issues. The main reason for discordant assessments is the inclusion of false-positive areas or cells identified by performance on certain tissue patterns or design choices in the computational implementation. To aid the adoption of ML for TIL assessment, we provide an in-depth discussion of ML and image analysis, including validation issues that need to be considered before reliable computational reporting of TILs can be incorporated into the trial and routine clinical management of patients with triple-negative breast cancer. (c) 2023 The Authors. The Journal of Pathology published by John Wiley & Sons Ltd on behalf of The Pathological Society of Great Britain and Ireland.}, + url = {http://dx.doi.org/10.1002/path.6155}, + file = {Thag23.pdf:pdf\Thag23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {The Journal of Pathology}, + citation-count = {2}, + automatic = {yes}, + pages = {498-513}, + volume = {260}, + ss_id = {13a37cf9771c8fbabc261e13c8a246346244f4ca}, + all_ss_ids = {['13a37cf9771c8fbabc261e13c8a246346244f4ca']}, + gscites = {3}, +} + +@article{Than22, + author = {Thannhauser, Jos and Nas, Joris and van der Sluijs, Koen and Zwart, Hans and de Boer, Menko-Jan and van Royen, Niels and Bonnes, Judith and Brouwer, Marc}, + title = {~~Pilot study on VF-waveform based algorithms for early detection of acute myocardial infarction during out-of-hospital cardiac arrest}, + doi = {10.1016/j.resuscitation.2022.03.025}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.resuscitation.2022.03.025}, + file = {Than22.pdf:pdf\\Than22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Resuscitation}, + automatic = {yes}, + citation-count = {2}, + pages = {62-67}, + volume = {174}, + pmid = {35351606}, } @article{Thee20, @@ -24355,6 +30242,26 @@ @article{Thee20 optnote = {DIAG, RADIOLOGY}, year = {2020}, month = {9}, + ss_id = {95d2ec56339169d30a774899c6f5b93540a3ce0f}, + all_ss_ids = {['95d2ec56339169d30a774899c6f5b93540a3ce0f']}, + gscites = {0}, +} + +@article{Thij23, + author = {Thijssen, Linda C.P. and de Rooij, Maarten and Barentsz, Jelle O. and Huisman, Henkjan J.}, + title = {Radiomics based automated quality assessment for T2W prostate MR images}, + doi = {10.1016/j.ejrad.2023.110928}, + year = {2023}, + abstract = {Purpose: The guidelines for prostate cancer recommend the use of MRI in the prostate cancer pathway. Due to the variability in prostate MR image quality, the reliability of this technique in the detection of prostate cancer is highly variable in clinical practice. This leads to the need for an objective and automated assessment of image quality to ensure an adequate acquisition and hereby to improve the reliability of MRI. The aim of this study is to investigate the feasibility of Blind/referenceless image spatial quality evaluator (Brisque) and radiomics in automated image quality assessment of T2-weighted (T2W) images. Method: Anonymized axial T2W images from 140 patients were scored for quality using a five-point Likert scale (low, suboptimal, acceptable, good, very good quality) in consensus by two readers. Images were dichotomized into clinically acceptable (very good, good and acceptable quality images) and clinically unacceptable (low and suboptimal quality images) in order to train and verify the model. Radiomics and Brisque features were extracted from a central cuboid volume including the prostate. A reduced feature set was used to fit a Linear Discriminant Analysis (LDA) model to predict image quality. Two hundred times repeated 5-fold cross-validation was used to train the model and test performance by assessing the classification accuracy, the discrimination accuracy as receiver operating curve - area under curve (ROC-AUC), and by generating confusion matrices. Results: Thirty-four images were classified as clinically unacceptable and 106 were classified as clinically acceptable. The accuracy of the independent test set (mean +- standard deviation) was 85.4 +- 5.5%. The ROC-AUC was 0.856 (0.851 - 0.861) (mean; 95% confidence interval). Conclusions:Radiomics AI can automatically detect a significant portion of T2W images of suboptimal image quality. This can help improve image quality at the time of acquisition, thus reducing repeat scans and improving diagnostic accuracy.}, + url = {http://dx.doi.org/10.1016/j.ejrad.2023.110928}, + file = {Thij23.pdf:pdf\Thij23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + citation-count = {0}, + automatic = {yes}, + ss_id = {59545ee081348d9f15f20f3e3c8d64e67039d262}, + all_ss_ids = {['59545ee081348d9f15f20f3e3c8d64e67039d262']}, + gscites = {0}, } @inproceedings{Timp02, @@ -24367,7 +30274,9 @@ @inproceedings{Timp02 pages = {240--242}, optnote = {DIAG, RADIOLOGY}, gsid = {17461913310296744924}, - gscites = {10}, + gscites = {11}, + ss_id = {0dba24303682aab3795c1493f5b639bd38d4358d}, + all_ss_ids = {['0dba24303682aab3795c1493f5b639bd38d4358d']}, } @article{Timp04, @@ -24386,6 +30295,8 @@ @article{Timp04 month = {4}, gsid = {1462730622144634090}, gscites = {220}, + ss_id = {475acaf8c5b6f148dd4faabc6a31d7a10605f748}, + all_ss_ids = {['475acaf8c5b6f148dd4faabc6a31d7a10605f748']}, } @article{Timp05, @@ -24404,6 +30315,8 @@ @article{Timp05 month = {7}, gsid = {2725768412742731883}, gscites = {31}, + ss_id = {d41140f0a6e06d1c94715dcd3ce6c4a9cda8ef48}, + all_ss_ids = {['d41140f0a6e06d1c94715dcd3ce6c4a9cda8ef48']}, } @article{Timp06, @@ -24422,6 +30335,8 @@ @article{Timp06 month = {2}, gsid = {7132504051186922195}, gscites = {59}, + ss_id = {0cac166d49f3f7193b4ef0ba591bc1155aa0a762}, + all_ss_ids = {['0cac166d49f3f7193b4ef0ba591bc1155aa0a762']}, } @phdthesis{Timp06a, @@ -24454,6 +30369,8 @@ @article{Timp07 month = {7}, gsid = {1885078493541058117}, gscites = {90}, + ss_id = {6151c62ea8b3f95b4b4545807cc7b0c9794e9fbf}, + all_ss_ids = {['6151c62ea8b3f95b4b4545807cc7b0c9794e9fbf']}, } @article{Timp10, @@ -24471,7 +30388,26 @@ @article{Timp10 pmid = {20403792}, month = {5}, gsid = {15819462963198853544}, - gscites = {24}, + gscites = {35}, + ss_id = {01bb3eaa558de011fe6cf83f854ed64208cf6785}, + all_ss_ids = {['01bb3eaa558de011fe6cf83f854ed64208cf6785']}, +} + +@article{Toga23, + author = {Togawa, Riku and Pfob, Andr\'{e} and B\"{u}sch, Christopher and Alwafai, Zaher and Balleyguier, Corinne and Clevert, Dirk-Andr\'{e} and Duda, Volker and Fastner, Sarah and Goncalo, Manuela and Gomez, Christina and Gruber, Ines and Hahn, Markus and Hennigs, Andr\'{e} and Kapetas, Panagiotis and Nees, Juliane and Ohlinger, Ralf and Riedel, Fabian and Rutten, Matthieu and Sch\"{a}fgen, Benedikt and Stieber, Anne and Tozaki, Mitsuhiro and Wojcinski, Sebastian and Rauch, Geraldine and Heil, J\"{o}rg and Barr, Richard and Golatta, Michael}, + title = {~~Potential of Lesion-to-Fat Elasticity Ratio Measured by Shear Wave Elastography to Reduce Benign Biopsies in BI-RADS 4 Breast Lesions}, + doi = {10.1002/jum.16192}, + year = {2023}, + abstract = {ObjectivesWe evaluated whether lesion-to-fat ratio measured by shear wave elastography in patients with Breast Imaging Reporting and Data System (BI-RADS) 3 or 4 lesions has the potential to further refine the assessment of B-mode ultrasound alone in breast cancer diagnostics.MethodsThis was a secondary analysis of an international diagnostic multicenter trial (NCT02638935). Data from 1288 women with breast lesions categorized as BI-RADS 3 and 4a-c by conventional B-mode ultrasound were analyzed, whereby the focus was placed on differentiating lesions categorized as BI-RADS 3 and BI-RADS 4a. All women underwent shear wave elastography and histopathologic evaluation functioning as reference standard. Reduction of benign biopsies as well as the number of missed malignancies after reclassification using lesion-to-fat ratio measured by shear wave elastography were evaluated.ResultsBreast cancer was diagnosed in 368 (28.6%) of 1288 lesions. The assessment with conventional B-mode ultrasound resulted in 53.8% (495 of 1288) pathologically benign lesions categorized as BI-RADS 4 and therefore false positives as well as in 1.39% (6 of 431) undetected malignancies categorized as BI-RADS 3. Additional lesion-to-fat ratio in BI-RADS 4a lesions with a cutoff value of 1.85 resulted in 30.11% biopsies of benign lesions which correspond to a reduction of 44.04% of false positives.ConclusionsAdding lesion-to-fat ratio measured by shear wave elastography to conventional B-mode ultrasound in BI-RADS 4a breast lesions could help reduce the number of benign biopsies by 44.04%. At the same time, however, 1.98% of malignancies were missed, which would still be in line with American College of Radiology BI-RADS 3 definition of <2% of undetected malignancies.}, + url = {http://dx.doi.org/10.1002/jum.16192}, + file = {Toga23.pdf:pdf\\Toga23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Journal of Ultrasound in Medicine}, + automatic = {yes}, + citation-count = {0}, + pages = {1729-1736}, + volume = {42}, + pmid = {36789976}, } @conference{Tom22a, @@ -24496,7 +30432,38 @@ @inproceedings{Trom12 file = {Trom12.pdf:pdf\\Trom12.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {14231121447187167510}, - gscites = {3}, + gscites = {5}, + ss_id = {6c9c323e06155fb6d4c3adff3ae733a45c55a4e6}, + all_ss_ids = {['6c9c323e06155fb6d4c3adff3ae733a45c55a4e6']}, +} + +@article{Tura21, + author = {Turan, Ayla S. and Jenniskens, Sjoerd and Martens, Jasper M. and Rutten, Matthieu J. C. M. and Yo, Lonneke S. F. and van Strijen, Marco J. L. and Drenth, Joost P. H. and Siersema, Peter D. and van Geenen, Erwin J. M.}, + title = {~~Complications of percutaneous transhepatic cholangiography and biliary drainage, a multicenter observational study}, + doi = {10.1007/s00261-021-03207-4}, + year = {2021}, + abstract = {Abstract + Objectives + Over 2500 percutaneous transhepatic cholangiography and biliary drainage (PTCD) procedures are yearly performed in the Netherlands. Most interventions are performed for treatment of biliary obstruction following unsuccessful endoscopic biliary cannulation. Our aim was to evaluate complication rates and risk factors for complications in PTCD patients after failed ERCP. + + Methods + We performed an observational study collecting data from a cohort that was subjected to PTCD during a 5-year period in one academic and four teaching hospitals. Primary objective was the development of infectious (sepsis, cholangitis, abscess, or cholecystitis) and non-infectious complications (bile leakage, severe hemorrhage, etc.) and mortality within 30 days of the procedure. Subsequently, risk factors for complications and mortality were analyzed with a multilevel logistic regression analysis. + + Results + A total of 331 patients underwent PTCD of whom 205 (61.9%) developed PTCD-related complications. Of the 224 patients without a pre-existent infection, 91 (40.6%) developed infectious complications, i.e., cholangitis in 26.3%, sepsis in 24.6%, abscess formation in 2.7%, and cholecystitis in 1.3%. Non-infectious complications developed in 114 of 331 patients (34.4%). 30-day mortality was 17.2% (N = 57). Risk factors for infectious complications included internal drainage and drain obstruction, while multiple re-interventions were a risk factor for non-infectious complications. + + Conclusion + Both infectious and non-infectious complications are frequent after PTCD, most often due to biliary drain obstruction. + }, + url = {http://dx.doi.org/10.1007/s00261-021-03207-4}, + file = {Tura21.pdf:pdf\\Tura21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Abdominal Radiology}, + automatic = {yes}, + citation-count = {9}, + pages = {3338-3344}, + volume = {47}, + pmid = {34357434}, } @article{Turn21, @@ -24513,32 +30480,52 @@ @article{Turn21 pmid = {33590805}, year = {2021}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/235783}, + ss_id = {ffa308f1ec81d2c2e1e75103af34ba450c33ee25}, + all_ss_ids = {['ffa308f1ec81d2c2e1e75103af34ba450c33ee25']}, + gscites = {5}, } @article{Twil21, taverne_url = {https://repository.ubn.ru.nl/handle/2066/235582}, } +@article{Twil21a, + author = {Twilt, Jasper J. and van Leeuwen, Kicky G. and Huisman, Henkjan J. and F\"{u}tterer, Jurgen J. and de Rooij, Maarten}, + title = {~~Artificial Intelligence Based Algorithms for Prostate Cancer Classification and Detection on Magnetic Resonance Imaging: A Narrative Review}, + doi = {10.3390/diagnostics11060959}, + year = {2021}, + abstract = {Due to the upfront role of magnetic resonance imaging (MRI) for prostate cancer (PCa) diagnosis, a multitude of artificial intelligence (AI) applications have been suggested to aid in the diagnosis and detection of PCa. In this review, we provide an overview of the current field, including studies between 2018 and February 2021, describing AI algorithms for (1) lesion classification and (2) lesion detection for PCa. Our evaluation of 59 included studies showed that most research has been conducted for the task of PCa lesion classification (66%) followed by PCa lesion detection (34%). Studies showed large heterogeneity in cohort sizes, ranging between 18 to 499 patients (median = 162) combined with different approaches for performance validation. Furthermore, 85% of the studies reported on the stand-alone diagnostic accuracy, whereas 15% demonstrated the impact of AI on diagnostic thinking efficacy, indicating limited proof for the clinical utility of PCa AI applications. In order to introduce AI within the clinical workflow of PCa assessment, robustness and generalizability of AI applications need to be further validated utilizing external validation and clinical workflow experiments.}, + url = {http://dx.doi.org/10.3390/diagnostics11060959}, + file = {Twil21a.pdf:pdf\\Twil21a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Diagnostics}, + automatic = {yes}, + citation-count = {35}, + pages = {959}, + volume = {11}, + pmid = {34073627}, +} + @conference{Twil23a, - author = {Jasper Twilt and Anindo Saha and Joeran S. Bosma and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Fütterer and Henkjan Huisman and Maarten de Rooij}, + author = {Jasper Twilt and Anindo Saha and Joeran S. Bosma and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Henkjan Huisman and Maarten de Rooij}, title = {Diagnostic Value of Dynamic Contrast-Enhanced MRI for the Detection of Clinically Significant Prostate Cancer in a Multi-Reader Study: Preliminary Results from the PI-CAI Consortium}, booktitle = ECR, year = {2023}, - abstract = {PURPOSE: The study aimed to compare the detection performance of ISUP grade group (GG) ≥ 2 using bi-parametric (bp)MRI, and multi-parametric (mp)MRI (which includes dynamic contrast IV administration), in an international reader study. METHODS: This retrospective reader study uses a cohort of 400 prostate mpMRI exams acquired between 2012–2021 at three Dutch and one Norwegian centres. Patients suspected of GG≥2 cancers and without prior prostate treatment or prior GG≥2 findings in an MRI-first approach are included. 65 radiologists (18 countries, 43 centres) with 1–23 years (median: 8) prostate reading experience were enlisted. Readers and cases are divided into blocks of 100 cases. For each case, readers assessed bpMRI and mpMRI in sequence to mimic theclinical routine. Suspected GG≥2 cancer findings were assigned a PI-RADS 3-5 score. Additionally, a patient-level suspicion score (0-100) of harbouring GG≥2 was indicated. Multi-reader multi-case (MRMC) analysis was used to compare the patient-level addedvalue of mpMRI. RESULTS: Preliminary results from the first 14 readers with 2–15 years (median: 9) of experience indicate that overall, there is little improvement in GG≥2 detections between bpMRI and mpMRI readings with AUROCs of 0.857 (95% CI: 0.83, 0.89) and 0.860 (95% CI: 0.83, 0.89), respectively. For individual readers, absolute differences in AUROC ranged between 0.00–0.03 (95% CI: 0.00, 0.01). CONCLUSION: MRI assessments in bpMRI had similar GG≥2 detections to mpMRI assessments at a per-case level. Multivariable influencers such as experience, workflow, image quality and protocol familiarity must be evaluated. LIMITATIONS: Preliminary results are limited by the sample size. mpMRI readings of the original data were used to guide histologic verification. 13 out of 14 readers had high expertise as per 2020 ESUR/ESUI consensus statements.}, + abstract = {PURPOSE: The study aimed to compare the detection performance of ISUP grade group (GG) >= 2 using bi-parametric (bp)MRI, and multi-parametric (mp)MRI (which includes dynamic contrast IV administration), in an international reader study. METHODS: This retrospective reader study uses a cohort of 400 prostate mpMRI exams acquired between 2012-2021 at three Dutch and one Norwegian centres. Patients suspected of GG>=2 cancers and without prior prostate treatment or prior GG>=2 findings in an MRI-first approach are included. 65 radiologists (18 countries, 43 centres) with 1-23 years (median: 8) prostate reading experience were enlisted. Readers and cases are divided into blocks of 100 cases. For each case, readers assessed bpMRI and mpMRI in sequence to mimic theclinical routine. Suspected GG>=2 cancer findings were assigned a PI-RADS 3-5 score. Additionally, a patient-level suspicion score (0-100) of harbouring GG>=2 was indicated. Multi-reader multi-case (MRMC) analysis was used to compare the patient-level addedvalue of mpMRI. RESULTS: Preliminary results from the first 14 readers with 2-15 years (median: 9) of experience indicate that overall, there is little improvement in GG>=2 detections between bpMRI and mpMRI readings with AUROCs of 0.857 (95% CI: 0.83, 0.89) and 0.860 (95% CI: 0.83, 0.89), respectively. For individual readers, absolute differences in AUROC ranged between 0.00-0.03 (95% CI: 0.00, 0.01). CONCLUSION: MRI assessments in bpMRI had similar GG>=2 detections to mpMRI assessments at a per-case level. Multivariable influencers such as experience, workflow, image quality and protocol familiarity must be evaluated. LIMITATIONS: Preliminary results are limited by the sample size. mpMRI readings of the original data were used to guide histologic verification. 13 out of 14 readers had high expertise as per 2020 ESUR/ESUI consensus statements.}, optnote = {DIAG, RADIOLOGY}, } @conference{Twil23b, - author = {Jasper Twilt and Anindo Saha and Joeran S. Bosma and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Fütterer and Henkjan Huisman and Maarten de Rooij}, + author = {Jasper Twilt and Anindo Saha and Joeran S. Bosma and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Henkjan Huisman and Maarten de Rooij}, title = {EAU Plenary Gamechanging Session - Artificial Intelligence and Radiologists at Prostate Cancer Detection in MRI: Preliminary Results from the PI-CAI Challenge}, booktitle = EAU, year = {2023}, - abstract = {PURPOSE: The PI-CAI (Prostate Imaging: Cancer AI) Challenge is an international study, with over 10,000 carefully-curated prostate MRI exams to validate modern AI algorithms and estimate radiologists’ performance at csPCa detection and diagnosis. PI-CAI primarily consists of two sub-studies: an AI study (Grand Challenge) and a reader study. In the end PI-CAI will benchmark state-of-the-art AI algorithms developed in the Grand Challenge, against prostate radiologists participating in the reader study. The aim is to present the study design and share the preliminary results of both sub-studies. METHODS: For the AI study, an annotated multi-center, multi-vendor dataset of 1500 bpMRI exams (including their basic clinical and acquisition variables) was made publicly available for all participating AI teams and the research community at large. Teams used this dataset to develop AI models, and at the end of this open development phase, all algorithms were ranked, based on their performance on a hidden testing cohort of 1000 unseen scans. In the ongoing closed testing phase, organizers will retrain the top-ranking 5 AI algorithms using a larger dataset of 9107 bpMRI scans (including additional training scans from a private dataset). Finally, their performance will be re-evaluated on the hidden testing cohort. For the reader study, 59 international prostate radiologists assessed a subset of 400 scans from the hidden testing cohort. Readers and cases were divided into blocks of 100 cases. For each case, readers assessed bpMRI and mpMRI in sequence to mimic clinical routine. Suspected GG≥2 cancer findings were assigned a PI-RADS 3-5 score. Additionally, a patient-level suspicion score (0-100) of harboring GG≥2 was indicated. Multi-reader multi-case (MRMC) analysis was used to compare the patient-level added value of mpMRI. RESULTS: From the AI study, the ranked results from the open development phase will be presented. From the reader study, preliminary results from the first 14 readers will be presented. Readers with 2–15 years (median: 9) of experience indicate that overall, there is little improvement in GG≥2 detections between bpMRI and mpMRI readings with AUROCs of 0.857 (95% CI: 0.83, 0.89) and 0.860 (95% CI: 0.83, 0.89), respectively. For individual readers, absolute differences in AUROC ranged between 0.00–0.03 (95% CI: 0.00, 0.01). CONCLUSION: The top 5 results from the open development phase of the AI study of the PI-CAI Challenge will be presented. Preliminary results from the PI-CAI reader study show that bpMRI had similar GG≥2 detection to mpMRI assessments at a per-case level. Multivariable influencers such as experience, workflow, image quality and protocol familiarity need to be evaluated. LIMITATIONS: Preliminary results are limited by the sample size. mpMRI readings of the original data were used to guide histologic verification. 13 out of 14 readers had high expertise as per 2020 ESUR/ESUI consensus statements.}, + abstract = {PURPOSE: The PI-CAI (Prostate Imaging: Cancer AI) Challenge is an international study, with over 10,000 carefully-curated prostate MRI exams to validate modern AI algorithms and estimate radiologists' performance at csPCa detection and diagnosis. PI-CAI primarily consists of two sub-studies: an AI study (Grand Challenge) and a reader study. In the end PI-CAI will benchmark state-of-the-art AI algorithms developed in the Grand Challenge, against prostate radiologists participating in the reader study. The aim is to present the study design and share the preliminary results of both sub-studies. METHODS: For the AI study, an annotated multi-center, multi-vendor dataset of 1500 bpMRI exams (including their basic clinical and acquisition variables) was made publicly available for all participating AI teams and the research community at large. Teams used this dataset to develop AI models, and at the end of this open development phase, all algorithms were ranked, based on their performance on a hidden testing cohort of 1000 unseen scans. In the ongoing closed testing phase, organizers will retrain the top-ranking 5 AI algorithms using a larger dataset of 9107 bpMRI scans (including additional training scans from a private dataset). Finally, their performance will be re-evaluated on the hidden testing cohort. For the reader study, 59 international prostate radiologists assessed a subset of 400 scans from the hidden testing cohort. Readers and cases were divided into blocks of 100 cases. For each case, readers assessed bpMRI and mpMRI in sequence to mimic clinical routine. Suspected GG>=2 cancer findings were assigned a PI-RADS 3-5 score. Additionally, a patient-level suspicion score (0-100) of harboring GG>=2 was indicated. Multi-reader multi-case (MRMC) analysis was used to compare the patient-level added value of mpMRI. RESULTS: From the AI study, the ranked results from the open development phase will be presented. From the reader study, preliminary results from the first 14 readers will be presented. Readers with 2-15 years (median: 9) of experience indicate that overall, there is little improvement in GG>=2 detections between bpMRI and mpMRI readings with AUROCs of 0.857 (95% CI: 0.83, 0.89) and 0.860 (95% CI: 0.83, 0.89), respectively. For individual readers, absolute differences in AUROC ranged between 0.00-0.03 (95% CI: 0.00, 0.01). CONCLUSION: The top 5 results from the open development phase of the AI study of the PI-CAI Challenge will be presented. Preliminary results from the PI-CAI reader study show that bpMRI had similar GG>=2 detection to mpMRI assessments at a per-case level. Multivariable influencers such as experience, workflow, image quality and protocol familiarity need to be evaluated. LIMITATIONS: Preliminary results are limited by the sample size. mpMRI readings of the original data were used to guide histologic verification. 13 out of 14 readers had high expertise as per 2020 ESUR/ESUI consensus statements.}, optnote = {DIAG, RADIOLOGY}, } @conference{Twil23c, - author = {Jasper Twilt and Anindo Saha and Joeran S. Bosma and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Fütterer and Henkjan Huisman and Maarten de Rooij}, + author = {Jasper Twilt and Anindo Saha and Joeran S. Bosma and Bram van Ginneken and Derya Yakar and Mattijs Elschot and Jeroen Veltman and Jurgen Futterer and Henkjan Huisman and Maarten de Rooij}, title = {International Comparative Study of Artificial Intelligence and Radiologists in Clinically Significant Prostate Cancer Detection: Results From The PI-CAI Consortium}, booktitle = {Annual Meeting of the Society for Advanced Body Imaging}, year = {2023}, @@ -24667,7 +30654,7 @@ @mastersthesis{Vala19 author = {Valacchi, Lorenzo}, title = {Analysis and endotracheal tube detection in chest x-rays using deep learning}, abstract = {The following work is focusing on the development of two deep learning models applied to chest x-rays. The first model, Imagesorter, provides a solution for sorting chest x-ray images where metadata is not available or is unreliable. This is frequently the case when accessing large collection of radiographs and can result in very time consuming procedures to obtain reliable data. Specifically, the algorithm returns four properties of the image: the type of image presented, rotation (wheather the image is rotated), inversion (whether the grayscale level of the radiograph inverted) and orientation (whether a lateral chest x-ray is mirrored). Nearly 30,000 radiographs were gathered and used to train, validate and test a deep convolutional neural network. For the purpose, a ResNet50 network pretrained on ImageNet and finetuned on the chest x-ray dataset was used. Moreover, the network architechture was modified to return all the four features at once. The model achieved very good results over the test set and can be consider a valid tool to efficiently explore and sort large x-ray collections. The second model, Endotracheal-Tube, detect the presence of an endotracheal tube in a chest x-ray. Many automated methods require to gather chest x-rays where an endotracheal tube is present. The presented algorithm can help gather reliable data from large collection in a short amount of time. A large dataset was created for the project and a preprocessing method to crop a square area of the image where the tube lays is presented. Four models are trained, validated and tested over the same dataset to assess the best. At the end an InceptionV3 network pretrained on ImageNet and finetuned on the dataset achieved the best results (AUC = 0.993). - Both projects are part of OpenCXR, an open source library developed by the Chest X-Ray teams at the Dignostic Image Analysis Group at the Radboud University Medical Center, Nijmegen, The Netherlands.}, + Both projects are part of OpenCXR, an open source library developed by the Chest X-Ray teams at the Dignostic Image Analysis Group at the Radboud University Medical Center, Nijmegen, The Netherlands.}, file = {Vala19.pdf:pdf\\Vala19.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, school = {Universita di Siena}, @@ -24680,16 +30667,16 @@ @conference{Valk19 booktitle = ARVO, title = {Familial discordance in disease phenotype in siblings with Stargardt disease}, abstract = {Purpose: - To investigate intersibling discordance of the Stargardt disease (STGD1) phenotype. - - Methods: - We performed a retrospective cohort study among siblings with genetically confirmed STGD1 and at least one available fundus autofluorescence (FAF) image of both eyes. We compared age of onset within families using the youngest patient as the reference and a predetermined threshold value of 10 years for significant differences. Disease duration was matched to investigate differences in best-corrected visual acuity, and we determined and compared the survival time for reaching severe visual impairment (SVI); (<20/200 Snellen or > 1.3 Logarithm of the Minimal Angle of Resolution (LogMAR)). Central retinal atrophy surface area was quantified and compared by two independent graders using the semi-automated EyeNED software. Additionally, both graders performed qualitative assessment of FAF patterns to identify phenotypic differences and commonalities. Main outcome measures included differences in age of onset, best-corrected visual acuity (BCVA), time to develop legal blindness, FAF atrophy surface area and autofluorescence patterns. - - Results: - Significant differences in age of onset were present in 5/17 families, ranging from 13 to 39 years. BCVA was matched in 12/17 families and the median difference was 0.41 (0 - 1.10) LogMAR for the right and 0.41 (0 - 1.08) LogMAR for the left eye, and we found extreme differences in five families ranging from 0.58 to 1.1 LogMAR. The median age at which patients developed SVI was 14 years. We observed significant differences in time to develop SVI in three out of 12 families with matched survival times, ranging from 14 to 29 years. Median central retinal atrophy surface area was 11.38 mm2 in the right (range 1.98 - 44.78 mm2) and 10.59 mm2 in the left (range 1.61 - 40.59 mm2) eyes and was highly comparable between siblings, with the exception of family one. Qualitative FAF phenotypes were comparable in all sibling pairs. - - Conclusions: - Phenotypic discordance between siblings with STGD1 disease carrying the same ABCA4 variants is a prevalent phenomenon. Functional outcomes can differ substantially despite highly comparable FAF phenotypes, which complicates sibling-based prognosis. While environmental factor are likely to modify the disease course, the relatively young median age at which patients develop SVI indicates an important role for genetic factors as disease modifiers.}, + To investigate intersibling discordance of the Stargardt disease (STGD1) phenotype. + + Methods: + We performed a retrospective cohort study among siblings with genetically confirmed STGD1 and at least one available fundus autofluorescence (FAF) image of both eyes. We compared age of onset within families using the youngest patient as the reference and a predetermined threshold value of 10 years for significant differences. Disease duration was matched to investigate differences in best-corrected visual acuity, and we determined and compared the survival time for reaching severe visual impairment (SVI); (<20/200 Snellen or > 1.3 Logarithm of the Minimal Angle of Resolution (LogMAR)). Central retinal atrophy surface area was quantified and compared by two independent graders using the semi-automated EyeNED software. Additionally, both graders performed qualitative assessment of FAF patterns to identify phenotypic differences and commonalities. Main outcome measures included differences in age of onset, best-corrected visual acuity (BCVA), time to develop legal blindness, FAF atrophy surface area and autofluorescence patterns. + + Results: + Significant differences in age of onset were present in 5/17 families, ranging from 13 to 39 years. BCVA was matched in 12/17 families and the median difference was 0.41 (0 - 1.10) LogMAR for the right and 0.41 (0 - 1.08) LogMAR for the left eye, and we found extreme differences in five families ranging from 0.58 to 1.1 LogMAR. The median age at which patients developed SVI was 14 years. We observed significant differences in time to develop SVI in three out of 12 families with matched survival times, ranging from 14 to 29 years. Median central retinal atrophy surface area was 11.38 mm2 in the right (range 1.98 - 44.78 mm2) and 10.59 mm2 in the left (range 1.61 - 40.59 mm2) eyes and was highly comparable between siblings, with the exception of family one. Qualitative FAF phenotypes were comparable in all sibling pairs. + + Conclusions: + Phenotypic discordance between siblings with STGD1 disease carrying the same ABCA4 variants is a prevalent phenomenon. Functional outcomes can differ substantially despite highly comparable FAF phenotypes, which complicates sibling-based prognosis. While environmental factor are likely to modify the disease course, the relatively young median age at which patients develop SVI indicates an important role for genetic factors as disease modifiers.}, optnote = {DIAG, RADIOLOGY}, year = {2019}, } @@ -24706,28 +30693,31 @@ @article{Valk19a doi = {10.1016/j.ophtha.2019.07.010}, url = {https://www.sciencedirect.com/science/article/pii/S0161642019306578?via%3Dihub}, abstract = {Purpose - To investigate intersibling phenotypic concordance in Stargardt disease (STGD1). - - Design - Retrospective cohort study. - - Participants - Siblings with genetically confirmed STGD1 and at least 1 available fundus autofluorescence (FAF) image of both eyes. - - Methods - We compared age at onset within families. Disease duration was matched to investigate differences in best-corrected visual acuity (BCVA) and compared the survival time for reaching severe visual impairment (<20/200 Snellen or >1.0 logarithm of the minimum angle of resolution [logMAR]). Central retinal atrophy area was quantified independently by 2 experienced graders using semiautomated software and compared between siblings. Both graders performed qualitative assessment of FAF and spectral-domain (SD) OCT images to identify phenotypic differences. - - Main Outcome Measures - Differences in age at onset, disease duration-matched BCVA, time to severe visual impairment development, FAF atrophy area, FAF patterns, and genotypes. - - Results - Substantial differences in age at onset were present in 5 of 17 families, ranging from 13 to 39 years. Median BCVA at baseline was 0.60 logMAR (range, -0.20 to 2.30 logMAR; Snellen equivalent, 20/80 [range, 20/12-hand movements]) in the right eye and 0.50 logMAR (range, -0.20 to 2.30 logMAR; Snellen equivalent, 20/63 [range, 20/12-hand movements]) in the left eye. Disease duration-matched BCVA was investigated in 12 of 17 families, and the median difference was 0.41 logMAR (range, 0.00-1.10 logMAR) for the right eye and 0.41 logMAR (range, 0.00-1.08 logMAR) for the left eye. We observed notable differences in time to severe visual impairment development in 7 families, ranging from 1 to 29 years. Median central retinal atrophy area was 11.38 mm2 in the right eye (range, 1.98-44.78 mm2) and 10.59 mm2 in the left eye (range, 1.61-40.59 mm2) and highly comparable between siblings. Similarly, qualitative FAF and SD OCT phenotypes were highly comparable between siblings. - - Conclusions - Phenotypic discordance between siblings with STGD1 carrying the same ABCA4 variants is a prevalent phenomenon. Although the FAF phenotypes are highly comparable between siblings, functional outcomes differ substantially. This complicates both sibling-based prognosis and genotype-phenotype correlations and has important implications for patient care and management.}, + To investigate intersibling phenotypic concordance in Stargardt disease (STGD1). + + Design + Retrospective cohort study. + + Participants + Siblings with genetically confirmed STGD1 and at least 1 available fundus autofluorescence (FAF) image of both eyes. + + Methods + We compared age at onset within families. Disease duration was matched to investigate differences in best-corrected visual acuity (BCVA) and compared the survival time for reaching severe visual impairment (<20/200 Snellen or >1.0 logarithm of the minimum angle of resolution [logMAR]). Central retinal atrophy area was quantified independently by 2 experienced graders using semiautomated software and compared between siblings. Both graders performed qualitative assessment of FAF and spectral-domain (SD) OCT images to identify phenotypic differences. + + Main Outcome Measures + Differences in age at onset, disease duration-matched BCVA, time to severe visual impairment development, FAF atrophy area, FAF patterns, and genotypes. + + Results + Substantial differences in age at onset were present in 5 of 17 families, ranging from 13 to 39 years. Median BCVA at baseline was 0.60 logMAR (range, -0.20 to 2.30 logMAR; Snellen equivalent, 20/80 [range, 20/12-hand movements]) in the right eye and 0.50 logMAR (range, -0.20 to 2.30 logMAR; Snellen equivalent, 20/63 [range, 20/12-hand movements]) in the left eye. Disease duration-matched BCVA was investigated in 12 of 17 families, and the median difference was 0.41 logMAR (range, 0.00-1.10 logMAR) for the right eye and 0.41 logMAR (range, 0.00-1.08 logMAR) for the left eye. We observed notable differences in time to severe visual impairment development in 7 families, ranging from 1 to 29 years. Median central retinal atrophy area was 11.38 mm2 in the right eye (range, 1.98-44.78 mm2) and 10.59 mm2 in the left eye (range, 1.61-40.59 mm2) and highly comparable between siblings. Similarly, qualitative FAF and SD OCT phenotypes were highly comparable between siblings. + + Conclusions + Phenotypic discordance between siblings with STGD1 carrying the same ABCA4 variants is a prevalent phenomenon. Although the FAF phenotypes are highly comparable between siblings, functional outcomes differ substantially. This complicates both sibling-based prognosis and genotype-phenotype correlations and has important implications for patient care and management.}, file = {Valk19a.pdf:pdf\\Valk19a.pdf:PDF}, optnote = {DIAG}, pmid = {31522899}, + ss_id = {edb057ef58b113686d38a747196234aa2409d517}, + all_ss_ids = {['edb057ef58b113686d38a747196234aa2409d517']}, + gscites = {16}, } @inproceedings{Vare03, @@ -24745,6 +30735,8 @@ @inproceedings{Vare03 month = {5}, gsid = {1721350618346932046}, gscites = {2}, + ss_id = {25296bf46f2f414dcb6a595df5cbd178db33760f}, + all_ss_ids = {['25296bf46f2f414dcb6a595df5cbd178db33760f']}, } @article{Vare05, @@ -24762,7 +30754,9 @@ @article{Vare05 pmid = {15890483}, month = {11}, gsid = {13242018771544321665}, - gscites = {46}, + gscites = {53}, + ss_id = {dd41e6d7a1923e0b947c4a7368c9a23b22570ec1}, + all_ss_ids = {['dd41e6d7a1923e0b947c4a7368c9a23b22570ec1']}, } @article{Vare06, @@ -24781,6 +30775,25 @@ @article{Vare06 month = {1}, gsid = {11079177198401411462}, gscites = {103}, + ss_id = {35ad1f356614dfd1fa89f11720f00e25546eb996}, + all_ss_ids = {['35ad1f356614dfd1fa89f11720f00e25546eb996']}, +} + +@article{Veen21, + author = {Veenhuizen, Stefanie G. A. and de Lange, St\'{e}phanie V. and Bakker, Marije F. and Pijnappel, Ruud M. and Mann, Ritse M. and Monninkhof, Evelyn M. and Emaus, Marleen J. and de Koekkoek-Doll, Petra K. and Bisschops, Robertus H. C. and Lobbes, Marc B. I. and de Jong, Mathijn D. F. and Duvivier, Katya M. and Veltman, Jeroen and Karssemeijer, Nico and de Koning, Harry J. and van Diest, Paul J. and Mali, Willem P. T. M. and van den Bosch, Maurice A. A. J. and van Gils, Carla H. and Veldhuis, Wouter B. and van Gils, C. H. and Bakker, M. F. and de Lange, S. V. and Veenhuizen, S. G. A. and Veldhuis, W. B. and Pijnappel, R. M. and Emaus, M. J. and Peeters, P. H. M. and Monninkhof, E. M. and Fernandez-Gallardo, M. A. and Mali, W. P. T. M. and van den Bosch, M. A. A. J. and van Diest, P. J. and Mann, R. M. and Mus, R. and Imhof-Tas, M. and Karssemeijer, N. and Loo, C. E. and de Koekkoek-Doll, P. K. and Winter-Warnars, H. A. O. and Bisschops, R. H. C. and Kock, M. C. J. M. and Storm, R. K. and van der Valk, P. H. M. and Lobbes, M. B. I. and Gommers, S. and Lobbes, M. B. I. and de Jong, M. D. F. and Rutten, M. J. C. M. and Duvivier, K. M. and de Graaf, P. and Veltman, J. and Bourez, R. L. J. H. and de Koning, H. J. and For the DENSE Trial Study Group}, + title = {~~Supplemental Breast MRI for Women with Extremely Dense Breasts: Results of the Second Screening Round of the DENSE Trial}, + doi = {10.1148/radiol.2021203633}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.2021203633}, + file = {Veen21.pdf:pdf\\Veen21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + automatic = {yes}, + citation-count = {52}, + pages = {278-286}, + volume = {299}, + pmid = {33724062}, } @article{Veld00, @@ -24799,6 +30812,8 @@ @article{Veld00 month = {11}, gsid = {13216335598903608465}, gscites = {133}, + ss_id = {0d8168e66dd34b4039f31cc5f47482262c0be316}, + all_ss_ids = {['0d8168e66dd34b4039f31cc5f47482262c0be316']}, } @article{Veld00a, @@ -24816,6 +30831,8 @@ @article{Veld00a month = {7}, gsid = {5530446128328601250}, gscites = {88}, + ss_id = {a89e968c04e1a53c00ade6242c95151f7b0306a1}, + all_ss_ids = {['a89e968c04e1a53c00ade6242c95151f7b0306a1']}, } @phdthesis{Veld00b, @@ -24847,6 +30864,8 @@ @inproceedings{Veld99a month = {5}, gsid = {12269146113897155166}, gscites = {30}, + ss_id = {c928564a62aecc7fa44d7df8351dddb4205f5a2b}, + all_ss_ids = {['c928564a62aecc7fa44d7df8351dddb4205f5a2b']}, } @article{Vele21, @@ -24888,7 +30907,9 @@ @inproceedings{Veli08 abstract = {{T}he goal of breast cancer screening programs is to detect cancers at an early (preclinical) stage, by using periodic mammographic examinations in asymptomatic women. {I}n evaluating cases, mammographers insist on reading multiple images (at least two) of each breast as a cancerous lesion tends to be observed in different breast projections (views). {M}ost computer-aided detection ({CAD}) systems, on the other hand, only analyze single views independently, and thus fail to account for the interaction between the views. {I}n this paper, we propose a {B}ayesian framework for exploiting multi-view dependencies between the suspected regions detected by a single-view {CAD} system. {T}he results from experiments with real-life data show that our approach outperforms the singleview {CAD} system in distinguishing between normal and abnormal cases. {S}uch a system can support screening radiologists to improve the evaluation of breast cancer cases.}, optnote = {DIAG, RADIOLOGY}, gsid = {18112098624718871737}, - gscites = {15}, + gscites = {16}, + ss_id = {284f37574c118db59ec67a3b1580faa4b9eb6c7e}, + all_ss_ids = {['284f37574c118db59ec67a3b1580faa4b9eb6c7e']}, } @inproceedings{Veli08a, @@ -24916,7 +30937,9 @@ @inproceedings{Veli08d file = {Veli08d.pdf:pdf\\Veli08d.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {2016380398303743237}, - gscites = {4}, + gscites = {6}, + ss_id = {77d011099d6238973e9650c55120bb45da1606c9}, + all_ss_ids = {['77d011099d6238973e9650c55120bb45da1606c9']}, } @article{Veli09, @@ -24935,6 +30958,8 @@ @article{Veli09 month = {1}, gsid = {17762538587165435446}, gscites = {62}, + ss_id = {035397ac5e8e55ade890c76064260a11bccd20a2}, + all_ss_ids = {['035397ac5e8e55ade890c76064260a11bccd20a2']}, } @inproceedings{Veli09a, @@ -24949,7 +30974,9 @@ @inproceedings{Veli09a file = {Veli09a.pdf:pdf\\Veli09a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {16954029226551321177}, - gscites = {2}, + gscites = {3}, + ss_id = {eeb232556baa984bf39d678b2619e58d47ee700d}, + all_ss_ids = {['eeb232556baa984bf39d678b2619e58d47ee700d']}, } @inbook{Veli10, @@ -24985,6 +31012,8 @@ @inproceedings{Veli10a pmid = {20841893}, gsid = {9105855608591043142}, gscites = {1}, + ss_id = {5afc31c4d231f5f54b5ee029573b80ee72ead116}, + all_ss_ids = {['5afc31c4d231f5f54b5ee029573b80ee72ead116']}, } @article{Veli12, @@ -25001,7 +31030,9 @@ @article{Veli12 pmid = {22326491}, month = {5}, gsid = {10983236706673950383}, - gscites = {30}, + gscites = {34}, + ss_id = {6e1eac7fb957bdd979dca2a234a9e02ee4656245}, + all_ss_ids = {['6e1eac7fb957bdd979dca2a234a9e02ee4656245']}, } @article{Veli13, @@ -25018,7 +31049,9 @@ @article{Veli13 pmid = {23395008}, month = {1}, gsid = {15041417062850503864}, - gscites = {36}, + gscites = {45}, + ss_id = {73de8d539bce0fed3f1d23153b169b00f35b4861}, + all_ss_ids = {['73de8d539bce0fed3f1d23153b169b00f35b4861']}, } @article{Velt08, @@ -25082,7 +31115,9 @@ @article{Velz20 pmid = {32043947}, month = {4}, gsid = {5150322759857162575}, - gscites = {10}, + gscites = {123}, + ss_id = {13c0cf335efd48efcb2f35187c5392e4a6e213b5}, + all_ss_ids = {['13c0cf335efd48efcb2f35187c5392e4a6e213b5']}, } @conference{Ven10b, @@ -25118,7 +31153,9 @@ @inproceedings{Ven11a file = {Ven11a.pdf:pdf\\Ven11a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, gsid = {837574192387609828}, - gscites = {8}, + gscites = {12}, + ss_id = {448cddc81736e13b617d55f2550ae3665dfb447e}, + all_ss_ids = {['448cddc81736e13b617d55f2550ae3665dfb447e']}, } @inproceedings{Ven13, @@ -25136,6 +31173,8 @@ @inproceedings{Ven13 month = {3}, gsid = {5774283271335261272}, gscites = {6}, + ss_id = {ef79b35cc95dd6c64d928f5043bc219be89d349f}, + all_ss_ids = {['ef79b35cc95dd6c64d928f5043bc219be89d349f']}, } @article{Ven13b, @@ -25153,7 +31192,9 @@ @article{Ven13b pmid = {23138386}, month = {11}, gsid = {13243942138839988061}, - gscites = {40}, + gscites = {45}, + ss_id = {9662f207e1ded0224a86f85f40dbaf2fd28dadd2}, + all_ss_ids = {['9662f207e1ded0224a86f85f40dbaf2fd28dadd2']}, } @article{Ven13c, @@ -25190,6 +31231,23 @@ @conference{Ven14 gscites = {3}, } +@article{Ven15, + author = {van de Ven, Wendy J. M. and Hu, Yipeng and Barentsz, Jelle O. and Karssemeijer, Nico and Barratt, Dean and Huisman, Henkjan J.}, + title = {~~Biomechanical modeling constrained surface-based image registration for prostate MR guided TRUS biopsy}, + doi = {10.1118/1.4917481}, + year = {2015}, + abstract = {Purpose:Adding magnetic resonance (MR)-derived information to standard transrectal ultrasound (TRUS) images for guiding prostate biopsy is of substantial clinical interest. A tumor visible on MR images can be projected on ultrasound (US) by using MR-US registration. A common approach is to use surface-based registration. The authors hypothesize that biomechanical modeling will better control deformation inside the prostate than a regular nonrigid surface-based registration method. The authors developed a novel method by extending a nonrigid surface-based registration algorithm with biomechanical finite element (FE) modeling to better predict internal deformations of the prostate.Methods:Data were collected from ten patients and the MR and TRUS images were rigidly registered to anatomically align prostate orientations. The prostate was manually segmented in both images and corresponding surface meshes were generated. Next, a tetrahedral volume mesh was generated from the MR image. Prostate deformations due to the TRUS probe were simulated using the surface displacements as the boundary condition. A three-dimensional thin-plate spline deformation field was calculated by registering the mesh vertices. The target registration errors (TREs) of 35 reference landmarks determined by surface and volume mesh registrations were compared.Results:The median TRE of a surface-based registration with biomechanical regularization was 2.76 (0.81-7.96) mm. This was significantly different than the median TRE of 3.47 (1.05-7.80) mm for regular surface-based registration without biomechanical regularization.Conclusions:Biomechanical FE modeling has the potential to improve the accuracy of multimodal prostate registration when comparing it to a regular nonrigid surface-based registration algorithm and can help to improve the effectiveness of MR guided TRUS biopsy procedures.}, + url = {http://dx.doi.org/10.1118/1.4917481}, + file = {Ven15.pdf:pdf\\Ven15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Medical Physics}, + automatic = {yes}, + citation-count = {17}, + pages = {2470-2481}, + volume = {42}, + pmid = {25979040}, +} + @phdthesis{Ven16, author = {van de Ven, W.}, title = {MRI guided TRUS prostate biopsy - a viable alternative?}, @@ -25216,22 +31274,24 @@ @article{Ven16a doi = {10.1016/j.clinimag.2016.02.005}, url = {http://dx.doi.org/10.1016/j.clinimag.2016.02.005}, abstract = {Objectives - To determine TRUS visibility of MR lesions. - - Methods - Data from 34 patients with 56 MR lesions and prostatectomy was used. Five observers localized and determined TRUS visibility during retrospective fusion. Visibility was correlated to PIRADS and Gleason scores. - - Results - TRUS visibility occurred in 43% of all MR lesions and 62% of PIRADS 5 lesions. Visible lesions had a significantly lower localization variability. On prostatectomy, 58% of the TRUS visible lesions had a Gleason 4 or 5 component. - - Conclusions - Almost half of the MR lesions were visible on TRUS. TRUS visible lesions were more aggressive than TRUS invisible lesions.}, + To determine TRUS visibility of MR lesions. + + Methods + Data from 34 patients with 56 MR lesions and prostatectomy was used. Five observers localized and determined TRUS visibility during retrospective fusion. Visibility was correlated to PIRADS and Gleason scores. + + Results + TRUS visibility occurred in 43% of all MR lesions and 62% of PIRADS 5 lesions. Visible lesions had a significantly lower localization variability. On prostatectomy, 58% of the TRUS visible lesions had a Gleason 4 or 5 component. + + Conclusions + Almost half of the MR lesions were visible on TRUS. TRUS visible lesions were more aggressive than TRUS invisible lesions.}, file = {Ven16a.pdf:pdf\\Ven16a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {25979040}, publisher = {Elsevier {BV}}, gsid = {18254238654977133234}, - gscites = {15}, + gscites = {21}, + ss_id = {d267d21a0b48d6019c2fda657e64b4134b64a956}, + all_ss_ids = {['d267d21a0b48d6019c2fda657e64b4134b64a956']}, } @article{Ven16f, @@ -25246,7 +31306,9 @@ @article{Ven16f optnote = {DIAG, RADIOLOGY}, pmid = {27068817}, gsid = {177659092002416014}, - gscites = {6}, + gscites = {7}, + ss_id = {bceba1685bbddd5368222e849db3237cb7c25745}, + all_ss_ids = {['bceba1685bbddd5368222e849db3237cb7c25745']}, } @article{Vend17c, @@ -25259,18 +31321,20 @@ @article{Vend17c pages = {1849-1855}, doi = {10.1007/s00345-017-2085-6}, abstract = {PURPOSE: To compare clinically significant prostate cancer (csPCa) detection rates between magnetic resonance imaging (MRI)-transrectal ultrasound (TRUS) fusion-guided prostate biopsy (FGB) and direct in-bore MRI-guided biopsy (MRGB). - METHODS: - We performed a comparison of csPCa detection rates between FGB and MRGB. Included patients had (1) at least one prior negative TRUS biopsy; (2) a Prostate Imaging Reporting and Data System (PI-RADS) 4 or 5 lesion and (3) a lesion size of >=8 mm measured in at least one direction. We considered a Gleason score >=7 being csPCa. Descriptive statistics with 95% confidence intervals (CI) were used to determine any differences. - RESULTS: - We included 51 patients with FGB (59 PI-RADS 4 and 41% PI-RADS 5) and 227 patients with MRGB (34 PI-RADS 4 and 66% PI-RADS 5). Included patients had a median age of 69 years (IQR, 65-72) and a median PSA level of 11.0 ng/ml (IQR, 7.4-15.1) and a median age of 67 years (IQR, 61-70), the median PSA 12.8 ng/ml (IQR, 9.1-19.0) within the FGB and the MRGB group, respectively. Detection rates of csPCA did not differ significantly between FGB and MRGB, 49 vs. 61%, respectively. - CONCLUSION: - We did not detect significant differences between FGB and MRGB in the detection of csPCa. The differences in detection ratios between both biopsy techniques are narrow with an increasing lesion size. This study warrants further studies to optimize selection of best biopsy modality.}, + METHODS: + We performed a comparison of csPCa detection rates between FGB and MRGB. Included patients had (1) at least one prior negative TRUS biopsy; (2) a Prostate Imaging Reporting and Data System (PI-RADS) 4 or 5 lesion and (3) a lesion size of >=8 mm measured in at least one direction. We considered a Gleason score >=7 being csPCa. Descriptive statistics with 95% confidence intervals (CI) were used to determine any differences. + RESULTS: + We included 51 patients with FGB (59 PI-RADS 4 and 41% PI-RADS 5) and 227 patients with MRGB (34 PI-RADS 4 and 66% PI-RADS 5). Included patients had a median age of 69 years (IQR, 65-72) and a median PSA level of 11.0 ng/ml (IQR, 7.4-15.1) and a median age of 67 years (IQR, 61-70), the median PSA 12.8 ng/ml (IQR, 9.1-19.0) within the FGB and the MRGB group, respectively. Detection rates of csPCA did not differ significantly between FGB and MRGB, 49 vs. 61%, respectively. + CONCLUSION: + We did not detect significant differences between FGB and MRGB in the detection of csPCa. The differences in detection ratios between both biopsy techniques are narrow with an increasing lesion size. This study warrants further studies to optimize selection of best biopsy modality.}, file = {Vend17c.pdf:pdf\\Vend17c.pdf:PDF}, optnote = {DIAG, MAGIC, RADIOLOGY}, pmid = {28871396}, month = {9}, gsid = {9029962091723191841}, - gscites = {18}, + gscites = {36}, + ss_id = {b51d0edf0370b89b246e7e67f58640ffebd2e224}, + all_ss_ids = {['b51d0edf0370b89b246e7e67f58640ffebd2e224']}, } @article{Vend18, @@ -25280,17 +31344,30 @@ @article{Vend18 pages = {219-227}, volume = {4}, abstract = {CONTEXT: The main difference between the available magnetic resonance imaging-transrectal ultrasound (MRI-TRUS) fusion platforms for prostate biopsy is the method of image registration being either rigid or elastic. As elastic registration compensates for possible deformation caused by the introduction of an ultrasound probe for example, it is expected that it would perform better than rigid registration. - OBJECTIVE: The aim of this meta-analysis is to compare rigid with elastic registration by calculating the detection odds ratio (OR) for both subgroups. The detection OR is defined as the ratio of the odds of detecting clinically significant prostate cancer (csPCa) by MRI-TRUS fusion biopsy compared with systematic TRUS biopsy. Secondary objectives were the OR for any PCa and the OR after pooling both registration techniques. - EVIDENCE ACQUISITION: The electronic databases PubMed, Embase, and Cochrane were systematically searched for relevant studies according to the Preferred Reporting Items for Systematic Review and Meta-analysis Statement. Studies comparing MRI-TRUS fusion and systematic TRUS-guided biopsies in the same patient were included. The quality assessment of included studies was performed using the Quality Assessment of Diagnostic Accuracy Studies version 2. - EVIDENCE SYNTHESIS: Eleven papers describing elastic and 10 describing rigid registration were included. Meta-analysis showed an OR of csPCa for elastic and rigid registration of 1.45 (95% confidence interval [CI]: 1.21-1.73, p<0.0001) and 1.40 (95% CI: 1.13-1.75, p=0.002), respectively. No significant difference was seen between the subgroups (p=0.83). Pooling subgroups resulted in an OR of 1.43 (95% CI: 1.25-1.63, p<0.00001). - CONCLUSIONS: No significant difference was identified between rigid and elastic registration for MRI-TRUS fusion-guided biopsy in the detection of csPCa; however, both techniques detected more csPCa than TRUS-guided biopsy alone. - PATIENT SUMMARY: We did not identify any significant differences in prostate cancer detection between two distinct magnetic resonance imaging-transrectal ultrasound fusion systems which vary in their method of compensating for prostate deformation.}, + OBJECTIVE: The aim of this meta-analysis is to compare rigid with elastic registration by calculating the detection odds ratio (OR) for both subgroups. The detection OR is defined as the ratio of the odds of detecting clinically significant prostate cancer (csPCa) by MRI-TRUS fusion biopsy compared with systematic TRUS biopsy. Secondary objectives were the OR for any PCa and the OR after pooling both registration techniques. + EVIDENCE ACQUISITION: The electronic databases PubMed, Embase, and Cochrane were systematically searched for relevant studies according to the Preferred Reporting Items for Systematic Review and Meta-analysis Statement. Studies comparing MRI-TRUS fusion and systematic TRUS-guided biopsies in the same patient were included. The quality assessment of included studies was performed using the Quality Assessment of Diagnostic Accuracy Studies version 2. + EVIDENCE SYNTHESIS: Eleven papers describing elastic and 10 describing rigid registration were included. Meta-analysis showed an OR of csPCa for elastic and rigid registration of 1.45 (95% confidence interval [CI]: 1.21-1.73, p<0.0001) and 1.40 (95% CI: 1.13-1.75, p=0.002), respectively. No significant difference was seen between the subgroups (p=0.83). Pooling subgroups resulted in an OR of 1.43 (95% CI: 1.25-1.63, p<0.00001). + CONCLUSIONS: No significant difference was identified between rigid and elastic registration for MRI-TRUS fusion-guided biopsy in the detection of csPCa; however, both techniques detected more csPCa than TRUS-guided biopsy alone. + PATIENT SUMMARY: We did not identify any significant differences in prostate cancer detection between two distinct magnetic resonance imaging-transrectal ultrasound fusion systems which vary in their method of compensating for prostate deformation.}, file = {:pdf/Vend18.pdf:PDF}, journal = EUF, optnote = {DIAG, MAGIC, RADIOLOGY}, pmid = {28753777}, year = {2018}, month = {3}, + all_ss_ids = {['2a846d2cba5a8f0db089e7f2af275a535b15d82f', 'acaba0aa6a8a55104ae2cb09f093aa30621ffb47', '974e8ae2bfa594ba157da2794a3254d12eb7bf26']}, + gscites = {54}, +} + +@inproceedings{Vend23, + author = {Vendittelli, Pierpaolo and Bokhorst, John-Melle and Smeets, Esther Markus and Kryklyva, Valentyna and Brosens, Lodewijk and Verbeke, Caroline and Litjens, Geert}, + booktitle = MIDL, + title = {Automatic quantification of {TSR} as a prognostic marker for pancreatic cancer.}, + url = {https://openreview.net/forum?id=Dtz_iaUpGc}, + abstract = {The current diagnostic and outcome prediction methods for pancreatic cancer lack prognostic power. As such, identifying novel biomarkers using machine learning has become of increasing interest. In this study, we introduce a novel method for estimating the tumor-stroma ratio (TSR) in whole slide images (WSIs) of pancreatic tissue and assess its potential as a prognostic biomarker. A multi-step strategy for estimating TSR is proposed, including epithelium segmentation based on an immunohistochemical reference standard, a coarse pancreatic cancer segmentation, and a post-processing pipeline for TSR quantification. The resultant segmentation models are validated on external test sets using the Dice coefficient, and additionally, the TSR's potential as a prognostic factor is assessed using survival analysis, resulting in a C-index of 0.61.}, + file = {:pdf/Vend23.pdf:PDF}, + optnote = {DIAG, PATHOLOGY, RADIOLOGY}, + year = {2023}, } @conference{Venh15a, @@ -25300,6 +31377,8 @@ @conference{Venh15a year = {2015}, abstract = {Purpose: Central serous chorioretinopathy ({CSC}) is an ocular disorder characterized by serous retinal detachment and associated with fluid accumulation beneath the retina. Obtaining accurate measures on the size and volume of the fluid deposit may be an important biomarker to assess disease progression and treatment outcome. We developed a system for automatic volumetric quantification of subretinal fluid in optical coherence tomography ({OCT}). Methods: OCT images obtained from 15 patients with varying presence of subretinal fluid were selected from the clinic. A 3D region growing based algorithm was developed to segment the fluid after selecting an arbitrary seed point located in the fluid deposit. The obtained total volume, and the extent of the segmented fluid volume were compared to manual delineations made by two experienced human graders. Results: A high intra-class correlation coefficient ({ICC}) value (0.997) was obtained when comparing the fluid volume calculated by the proposed method with the volume delineated by the two graders. Similarly, the spatial overlap agreement on the obtained volumes, measured with the Dice similarity coefficient ({DC}), between the manual delineations and the software output was high ({DC}=0.87) and comparable to the overlap agreement between observersAC/a,!a,,C/ delineations ({DC}=0.85). In addition, the quantification time was reduced substantially by a factor of 5 compared to manual assessment. The quantified values obtained by the algorithm were shown to be highly reproducible, obtaining a {DC} value of 0.99 and an ICC value of 0.98 when varying the seed point used for initializing the algorithm. Conclusions: An image analysis algorithm for the automatic quantification of subretinal fluid in {OCT} images of {CSC} patients was developed. The proposed algorithm is able to accurately quantify the extent of fluid deposits in a fast and reproducible manner, allowing accurate assessment of disease progression and treatment outcome.}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {a11db962a303ac6cb6ff4216659189c2ed378c21}, + gscites = {0}, } @inproceedings{Venh15b, @@ -25316,7 +31395,9 @@ @inproceedings{Venh15b number = {94141I}, month = {3}, gsid = {6530553375448056275}, - gscites = {45}, + gscites = {72}, + ss_id = {ae6efa478f3ecc99f2273effad86f7a8a78a333a}, + all_ss_ids = {['ae6efa478f3ecc99f2273effad86f7a8a78a333a']}, } @conference{Venh15c, @@ -25327,6 +31408,8 @@ @conference{Venh15c abstract = {Major causes of blindness in developed countries are retinal vascular diseases, essentially neovascular age-related macular degeneration ({AMD}), retinal vein occlusion ({RVO}) and diabetic maculopathy ({DMP}). Automated computer-aided detection and diagnosis~({CAD}) systems capable of detecting, classifying and quantifying the characteristics of the pathology associated with these retinal diseases is highly beneficial in the diagnosis, treatment prediction and the assessment of treatment progression. Among others, the presence of retinal cysts are an important biomarker in AMD and RVO, thus their detection and segmentation is beneficial to clinical disease analysis. Optical Coherence Tomography ({OCT}) imaging has the ability to visualize and analyze the morphology of the retina, as well as its abnormalities. Due to the technological advances in OCT imaging with regard to imaging speed, image quality, and functional analysis, OCT is rapidly becoming one of the main imaging modalities used in clinical practice, and for the quantification and analysis of disease-specific biomarkers such as cyst volume. In this work we propose a fully automatic CAD system for retinal cyst segmentation in OCT volume scans. The system is capable of detecting cysts in OCT volumes acquired with OCT scanners from different vendors, for which the amount of noise, image quality and contrast varies strongly.}, file = {venh15a.pdf:pdf\\venh15a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, + all_ss_ids = {1d3c69edf9e573412de0c758b3db1b8f9996f2c1}, + gscites = {13}, } @conference{Venh16a, @@ -25336,6 +31419,8 @@ @conference{Venh16a abstract = {Purpose: In age-related macular degeneration (AMD) the presence of intraretinal cysts is an important prognostic biomarker. Optical coherence tomography imaging has shown to be capable of accurately visualizing and characterizing the three-dimensional shape and extent of cysts. The detection and quantification of intraretinal cysts is highly beneficial for the prediction of treatment outcome and the assessment of the treatment progression. To aid the clinician with quantified information regarding cysts, we developed a fully automated system capable of detecting and delineating intraretinal cysts in 3D optical coherence tomography images.Methods: For this study a total of 30 OCT volumes acquired using four different brand OCT scanners were provided by the OPTIMA cyst segmentation challenge, containing a wide variety of retinal cysts together with manual cyst delineations. A pixel classifier based on a multiscale convolutional neural network (CNN) was developed to predict if an image pixel belongs to a cyst or to the background by considering a small neighborhood around the pixel of interest. The CNN follows a two stage approach, where in the first stage, multiple CNN's are used in parallel to obtain a segmentation at different image scales. In the second stage, the individual segmentations are merged, combining local information obtained with the lower scale network with contextual information obtained from the higher scale networks. After providing the neural network with enough training samples, the network can automatically detect and segment cysts in OCT volumes. The obtained segmentations were compared to manual delineations made by two experienced human graders. Results: The spatial overlap agreement on the obtained volumes, measured by the Dice similarity coefficient, between the manual delineations and the software output was 0.55, which is substantial considering the difficulty of the task. In addition, the quantification time was reduced dramatically, and takes only a few seconds for a complete OCT volume. Conclusions: An image analysis algorithm for the automatic quantification of intraretinal cysts in OCT images was developed. The proposed algorithm is able to detect and quantify the three dimensional shape and extent of cysts in a fast and reproducible manner, allowing accurate assessment of disease progression and treatment outcome.}, optnote = {DIAG, RADIOLOGY}, year = {2016}, + all_ss_ids = {a1b328c04b54decb972bf83458f9df4ab5608af3}, + gscites = {11}, } @conference{Venh17, @@ -25343,32 +31428,32 @@ @conference{Venh17 booktitle = ARVO, title = {Fully automated detection of hyperreflective foci in optical coherence tomography}, abstract = {Purpose: Diabetic macular edema ({DME}) is a retinal disorder characterized by a buildup of cystoidal fluid in the retina. - The typical treatment consists of monthly intravitreal anti vascular endothelial growth factor (anti-VEGF) injections. - However, the efficacy of this treatment varies strongly. - Recent studies have indicated that the presence and number of hyperreflective foci can possibly be considered a prognostic biomarker for treatment response in {DME}. - As the detection of foci is difficult and time-consuming manual foci quantification seems infeasible. - We therefore developed a fully automated system capable of detecting and quantifying foci in optical coherence tomography ({OCT}) images. - Methods: - 119 fovea centered B-scans obtained from 49 patients with {DME} were selected from a clinical database. - The data was divided in a training set of 96 {B}-scans from 40 patients, and a test set containing 23 {B}-scans from 9 patients. - A convolutional neural network ({CNN}) was developed to predict if an image pixel belongs to a hyperreflective focus by considering a small neighborhood around the pixel of interest. - The {CNN} consists of 7 convolutional layers and 2 max pooling layers. - After providing the system with enough training samples, the network automatically detects pixels with a high probability of being part of a hyperreflective focus. - Connected detections are considered as a single detection. - The obtained results were compared to manual annotations made by two experienced human graders in consensus for the central 3 mm surrounding the fovea. - Hyperreflective foci were only annotated in the layers ranging from the inner plexiform layer ({IPL}) to the outer nuclear layer ({ONL}) as manual detection is challenging in the other layers. - When a detection is overlapping with an annotated focus it is considered a true positive, otherwise it is counted as a false positive. - - Results: - - In the independent test set a sensitivity of 0.83 was obtained. - At this level of sensitivity, an average of 8.3 false positives per {B}-scan were detected. - False positives were mainly caused by detections outside the selected range ({ILP} to {ONL}) and misdetections by the graders. - - Conclusions: - - An image analysis algorithm for the automatic detection and quantification of hyperreflective foci in {OCT} {B}-scans was developed. - The experiments show promising results to obtain quantitative foci based biomarkers that can be used for the prediction of treatment response in {DME}.}, + The typical treatment consists of monthly intravitreal anti vascular endothelial growth factor (anti-VEGF) injections. + However, the efficacy of this treatment varies strongly. + Recent studies have indicated that the presence and number of hyperreflective foci can possibly be considered a prognostic biomarker for treatment response in {DME}. + As the detection of foci is difficult and time-consuming manual foci quantification seems infeasible. + We therefore developed a fully automated system capable of detecting and quantifying foci in optical coherence tomography ({OCT}) images. + Methods: + 119 fovea centered B-scans obtained from 49 patients with {DME} were selected from a clinical database. + The data was divided in a training set of 96 {B}-scans from 40 patients, and a test set containing 23 {B}-scans from 9 patients. + A convolutional neural network ({CNN}) was developed to predict if an image pixel belongs to a hyperreflective focus by considering a small neighborhood around the pixel of interest. + The {CNN} consists of 7 convolutional layers and 2 max pooling layers. + After providing the system with enough training samples, the network automatically detects pixels with a high probability of being part of a hyperreflective focus. + Connected detections are considered as a single detection. + The obtained results were compared to manual annotations made by two experienced human graders in consensus for the central 3 mm surrounding the fovea. + Hyperreflective foci were only annotated in the layers ranging from the inner plexiform layer ({IPL}) to the outer nuclear layer ({ONL}) as manual detection is challenging in the other layers. + When a detection is overlapping with an annotated focus it is considered a true positive, otherwise it is counted as a false positive. + + Results: + + In the independent test set a sensitivity of 0.83 was obtained. + At this level of sensitivity, an average of 8.3 false positives per {B}-scan were detected. + False positives were mainly caused by detections outside the selected range ({ILP} to {ONL}) and misdetections by the graders. + + Conclusions: + + An image analysis algorithm for the automatic detection and quantification of hyperreflective foci in {OCT} {B}-scans was developed. + The experiments show promising results to obtain quantitative foci based biomarkers that can be used for the prediction of treatment response in {DME}.}, optnote = {DIAG, RADIOLOGY}, year = {2017}, } @@ -25383,24 +31468,26 @@ @article{Venh17a pages = {2318-2328}, doi = {10.1167/iovs.16-20541}, abstract = {Purpose: To evaluate a machine learning algorithm that automatically grades age-related macular degeneration (AMD) severity stages from optical coherence tomography (OCT) scans. - Methods: A total of 3265 {OCT} scans from 1016 patients with either no signs of {AMD} or with signs of early, intermediate, or advanced {AMD} were randomly selected from a large European multicenter database. - A machine learning system was developed to automatically grade unseen {OCT} scans into different {AMD} severity stages without requiring retinal layer segmentation. - The ability of the system to identify high-risk {AMD} stages and to assign the correct severity stage was determined by using receiver operator characteristic ({ROC}) analysis and {C}ohen's Kappa statistics, respectively. - The results were compared to those of two human observers. - Reproducibility was assessed in an independent, publicly available data set of 384 {OCT} scans. - Results: - The system achieved an area under the {ROC} curve of 0.980 with a sensitivity of 98.2% at a specificity of 91.2%. - This compares favorably with the performance of human observers who achieved sensitivities of 97.0% and 99.4% at specificities of 89.7% and 87.2%, respectively. - A good level of agreement with the reference was obtained (Kappa = 0.713) and was in concordance with the human observers (Kappa = 0.775 and Kappa = 0.755, respectively). - Conclusions: - A machine learning system capable of automatically grading {OCT} scans into {AMD} severity stages was developed and showed similar performance as human observers. - The proposed automatic system allows for a quick and reliable grading of large quantities of {OCT} scans, which could increase the efficiency of large-scale AMD studies and pave the way for {AMD} screening using {OCT}.}, + Methods: A total of 3265 {OCT} scans from 1016 patients with either no signs of {AMD} or with signs of early, intermediate, or advanced {AMD} were randomly selected from a large European multicenter database. + A machine learning system was developed to automatically grade unseen {OCT} scans into different {AMD} severity stages without requiring retinal layer segmentation. + The ability of the system to identify high-risk {AMD} stages and to assign the correct severity stage was determined by using receiver operator characteristic ({ROC}) analysis and {C}ohen's Kappa statistics, respectively. + The results were compared to those of two human observers. + Reproducibility was assessed in an independent, publicly available data set of 384 {OCT} scans. + Results: + The system achieved an area under the {ROC} curve of 0.980 with a sensitivity of 98.2% at a specificity of 91.2%. + This compares favorably with the performance of human observers who achieved sensitivities of 97.0% and 99.4% at specificities of 89.7% and 87.2%, respectively. + A good level of agreement with the reference was obtained (Kappa = 0.713) and was in concordance with the human observers (Kappa = 0.775 and Kappa = 0.755, respectively). + Conclusions: + A machine learning system capable of automatically grading {OCT} scans into {AMD} severity stages was developed and showed similar performance as human observers. + The proposed automatic system allows for a quick and reliable grading of large quantities of {OCT} scans, which could increase the efficiency of large-scale AMD studies and pave the way for {AMD} screening using {OCT}.}, file = {Venh17a.pdf:pdf\\Venh17a.pdf:PDF}, optnote = {DIAG}, pmid = {28437528}, month = {4}, gsid = {8934997778421574924}, - gscites = {37}, + gscites = {91}, + ss_id = {e29513095896df886ebef0e1c8f8e40b6990cd70}, + all_ss_ids = {['e29513095896df886ebef0e1c8f8e40b6990cd70']}, } @article{Venh17b, @@ -25413,16 +31500,18 @@ @article{Venh17b pages = {3292-3316}, doi = {10.1364/BOE.8.003292}, abstract = {We developed a fully automated system using a convolutional neural network ({CNN}) for total retina segmentation in optical coherence tomography ({OCT}) that is robust to the presence of severe retinal pathology. - A generalized U-net network architecture was introduced to include the large context needed to account for large retinal changes. - The proposed algorithm outperformed qualitative and quantitatively two available algorithms. - The algorithm accurately estimated macular thickness with an error of 14.0 +- 22.1 micrometer, substantially lower than the error obtained using the other algorithms (42.9 +- 22.1 micrometer and 27.1 +- 69.3 micrometer, respectively). - These results highlighted the proposed algorithm's capability of modeling the wide variability in retinal appearance and obtained a robust and reliable retina segmentation even in severe pathological cases.}, + A generalized U-net network architecture was introduced to include the large context needed to account for large retinal changes. + The proposed algorithm outperformed qualitative and quantitatively two available algorithms. + The algorithm accurately estimated macular thickness with an error of 14.0 +- 22.1 micrometer, substantially lower than the error obtained using the other algorithms (42.9 +- 22.1 micrometer and 27.1 +- 69.3 micrometer, respectively). + These results highlighted the proposed algorithm's capability of modeling the wide variability in retinal appearance and obtained a robust and reliable retina segmentation even in severe pathological cases.}, file = {Venh17b.pdf:pdf\\Venh17b.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {28717568}, month = {6}, gsid = {14231747073311085579}, - gscites = {59}, + gscites = {114}, + ss_id = {855320ff1a3bda22505a8f9da2c24f56c5ee7712}, + all_ss_ids = {['855320ff1a3bda22505a8f9da2c24f56c5ee7712']}, } @article{Venh18, @@ -25439,6 +31528,9 @@ @article{Venh18 optnote = {DIAG, RADIOLOGY}, pmid = {29675301}, month = {3}, + ss_id = {c4c99492376b43d7120067f806aa2d000fbc0b2f}, + all_ss_ids = {['374f4a7676183c95f901e655f2caf170cdd9ec9d', 'c4c99492376b43d7120067f806aa2d000fbc0b2f']}, + gscites = {112}, } @phdthesis{Venh19, @@ -25470,6 +31562,8 @@ @article{Veni04 month = {6}, gsid = {9566812802239413729}, gscites = {53}, + ss_id = {62f44c20139e5638ca3f9b67bb446ebc8c4bac25}, + all_ss_ids = {['62f44c20139e5638ca3f9b67bb446ebc8c4bac25']}, } @conference{Venk20, @@ -25495,6 +31589,9 @@ @article{Venk21 optnote = {DIAG, RADIOLOGY}, pmid = {34003056}, year = {2021}, + ss_id = {80ec09e2cb8244cab245f1108496af731d160ebf}, + all_ss_ids = {['80ec09e2cb8244cab245f1108496af731d160ebf']}, + gscites = {51}, } @conference{Venk21a, @@ -25502,10 +31599,10 @@ @conference{Venk21a booktitle = RSNA, title = {Integration Of A Deep Learning Algorithm Into The Clinically Established PanCan Model For Malignancy Risk Estimation Of Screen-detected Pulmonary Nodules In First Screening CT}, abstract = {PURPOSE: To quantify the added value of integrating a deep learning algorithm (DLA)'s output to the existing Pan-Canadian Early Detection of Lung Cancer Study (PanCan) models for estimating malignancy risk of screen-detected pulmonary nodules. - METHODS AND MATERIALS: Our DLA was trained on a cohort of 14,828 benign and 1,249 malignant nodules from the National Lung Screening Trial. In the present study, we derived a new multivariable logistic regression model on the PanCan data that included the DLA risk score and the original variables from the PanCan model 2b except for "nodule type" and "spiculation" as these are already encoded in the DLA risk score. The new model was externally validated on baseline nodules from the Danish Lung Cancer Screening Trial (DLCST). For comparison, the performances of the existing PanCan model 2b and of our DLA stand-alone were also calculated. - RESULTS: 6024 benign and 86 malignant nodules from the PanCan data were included as the development set, and 818 benign and 34 malignant nodules from the Danish Lung Cancer Screening Trial (DLCST) were included as the validation set. The area under the receiver operating characteristic curve (AUC) for the DLA, PanCan model 2b, and the new model in the PanCan cohort were 0.944 (95% confidence interval = 0.917 - 0.968), 0.941 (0.908 - 0.969), and 0.944 (0.909 - 0.975), respectively. In the DLCST cohort, the AUCs were 0.917 (0.851 - 0.968), 0.896 (0.841 - 0.944), and 0.927 (0.878 - 0.969), respectively. - CONCLUSIONS: Using our DLA risk score to derive a new multivariable logistic regression model on the PanCan data does not appear to significantly improve the predictive performance in high-risk screening participants, but may serve as a replacement for the "nodule type" and "spiculation" parameters that are known to have substantial interobserver variability. - CLINICAL RELEVANCE / APPLICATION: Our DLA has a comparable nodule malignancy risk estimation performance to the PanCan models. This may help to make the computation of nodule risk scores easier and less subjective.}, + METHODS AND MATERIALS: Our DLA was trained on a cohort of 14,828 benign and 1,249 malignant nodules from the National Lung Screening Trial. In the present study, we derived a new multivariable logistic regression model on the PanCan data that included the DLA risk score and the original variables from the PanCan model 2b except for "nodule type" and "spiculation" as these are already encoded in the DLA risk score. The new model was externally validated on baseline nodules from the Danish Lung Cancer Screening Trial (DLCST). For comparison, the performances of the existing PanCan model 2b and of our DLA stand-alone were also calculated. + RESULTS: 6024 benign and 86 malignant nodules from the PanCan data were included as the development set, and 818 benign and 34 malignant nodules from the Danish Lung Cancer Screening Trial (DLCST) were included as the validation set. The area under the receiver operating characteristic curve (AUC) for the DLA, PanCan model 2b, and the new model in the PanCan cohort were 0.944 (95% confidence interval = 0.917 - 0.968), 0.941 (0.908 - 0.969), and 0.944 (0.909 - 0.975), respectively. In the DLCST cohort, the AUCs were 0.917 (0.851 - 0.968), 0.896 (0.841 - 0.944), and 0.927 (0.878 - 0.969), respectively. + CONCLUSIONS: Using our DLA risk score to derive a new multivariable logistic regression model on the PanCan data does not appear to significantly improve the predictive performance in high-risk screening participants, but may serve as a replacement for the "nodule type" and "spiculation" parameters that are known to have substantial interobserver variability. + CLINICAL RELEVANCE / APPLICATION: Our DLA has a comparable nodule malignancy risk estimation performance to the PanCan models. This may help to make the computation of nodule risk scores easier and less subjective.}, optnote = {DIAG, RADIOLOGY}, year = {2021}, } @@ -25515,37 +31612,73 @@ @conference{Venk22 booktitle = ECR, title = {Deep learning for estimating pulmonary nodule malignancy risk using prior CT examinations in lung cancer screening}, abstract = {Purpose or Learning Objective: Nodule size, morphology, and growth are important factors for accurately estimating nodule malignancy risk in lung cancer screening CT examinations. In this work, we aimed to develop a deep learning (DL) algorithm that uses a current and a prior CT examination to estimate the malignancy risk of pulmonary nodules. - - Methods or Background: We developed a dual time-point DL algorithm by stacking the nodules from the current and prior CT examinations in the input channels of convolutional neural networks. We used 3,011 nodules (286 malignant) and 994 nodules (73 malignant) as development and hold-out test cohorts from the National Lung Screening Trial, respectively. The reference standard was set by histopathologic confirmation or CT follow-up of more than two years. We compared the performance of the algorithm against PanCan model 2b and a previously published single time-point DL algorithm that only processed a single CT examination. We used the area under the receiver operating characteristic curve (AUC) to measure discrimination performance and a standard permutation test with 10,000 random permutations to compute p-values. - - Results or Findings: The dual time-point DL algorithm achieved an AUC of 0.94 (95% CI: 0.91 - 0.97) on the hold-out test cohort. The algorithm outperformed the single time-point DL algorithm and the PanCan model, which had AUCs of 0.92 (95% CI: 0.89 - 0.95; p = 0.055) and 0.88 (95% CI: 0.85 - 0.91; p < 0.001), respectively. - - Conclusion: Deep learning algorithms using current and prior CT examinations have the potential to accurately estimate the malignancy risk of pulmonary nodules. - - Limitations: External validation is needed on other screening datasets to generate further evidence. - - Ethics committee approval: Institutional review board approval was obtained at each of the 33 centers involved in the NLST. - - Funding for this study: Research grant from MeVis Medical Solutions AG.}, + + Methods or Background: We developed a dual time-point DL algorithm by stacking the nodules from the current and prior CT examinations in the input channels of convolutional neural networks. We used 3,011 nodules (286 malignant) and 994 nodules (73 malignant) as development and hold-out test cohorts from the National Lung Screening Trial, respectively. The reference standard was set by histopathologic confirmation or CT follow-up of more than two years. We compared the performance of the algorithm against PanCan model 2b and a previously published single time-point DL algorithm that only processed a single CT examination. We used the area under the receiver operating characteristic curve (AUC) to measure discrimination performance and a standard permutation test with 10,000 random permutations to compute p-values. + + Results or Findings: The dual time-point DL algorithm achieved an AUC of 0.94 (95% CI: 0.91 - 0.97) on the hold-out test cohort. The algorithm outperformed the single time-point DL algorithm and the PanCan model, which had AUCs of 0.92 (95% CI: 0.89 - 0.95; p = 0.055) and 0.88 (95% CI: 0.85 - 0.91; p < 0.001), respectively. + + Conclusion: Deep learning algorithms using current and prior CT examinations have the potential to accurately estimate the malignancy risk of pulmonary nodules. + + Limitations: External validation is needed on other screening datasets to generate further evidence. + + Ethics committee approval: Institutional review board approval was obtained at each of the 33 centers involved in the NLST. + + Funding for this study: Research grant from MeVis Medical Solutions AG.}, optnote = {DIAG, RADIOLOGY}, year = {2022}, } +@article{Venk23, + author = {Venkadesh, Kiran Vaidhya and Aleef, Tajwar Abrar and Scholten, Ernst T. and Saghir, Zaigham and Silva, Mario and Sverzellati, Nicola and Pastorino, Ugo and van Ginneken, Bram and Prokop, Mathias and Jacobs, Colin}, + title = {Prior CT Improves Deep Learning for Malignancy Risk Estimation of Screening-detected Pulmonary Nodules}, + doi = {10.1148/radiol.223308}, + url = {http://dx.doi.org/10.1148/radiol.223308}, + volume = {308}, + number = {2}, + algorithm = {https://grand-challenge.org/algorithms/temporal-nodule-analysis/}, + abstract = {Background + Prior chest CT provides valuable temporal information (eg, changes in nodule size or appearance) to accurately estimate malignancy risk. + + Purpose + To develop a deep learning (DL) algorithm that uses a current and prior low-dose CT examination to estimate 3-year malignancy risk of pulmonary nodules. + + Materials and Methods + In this retrospective study, the algorithm was trained using National Lung Screening Trial data (collected from 2002 to 2004), wherein patients were imaged at most 2 years apart, and evaluated with two external test sets from the Danish Lung Cancer Screening Trial (DLCST) and the Multicentric Italian Lung Detection Trial (MILD), collected in 2004-2010 and 2005-2014, respectively. Performance was evaluated using area under the receiver operating characteristic curve (AUC) on cancer-enriched subsets with size-matched benign nodules imaged 1 and 2 years apart from DLCST and MILD, respectively. The algorithm was compared with a validated DL algorithm that only processed a single CT examination and the Pan-Canadian Early Lung Cancer Detection Study (PanCan) model. + + Results + The training set included 10 508 nodules (422 malignant) in 4902 trial participants (mean age, 64 years +- 5 [SD]; 2778 men). The size-matched external test sets included 129 nodules (43 malignant) and 126 nodules (42 malignant). The algorithm achieved AUCs of 0.91 (95% CI: 0.85, 0.97) and 0.94 (95% CI: 0.89, 0.98). It significantly outperformed the DL algorithm that only processed a single CT examination (AUC, 0.85 [95% CI: 0.78, 0.92; P = .002]; and AUC, 0.89 [95% CI: 0.84, 0.95; P = .01]) and the PanCan model (AUC, 0.64 [95% CI: 0.53, 0.74; P < .001]; and AUC, 0.63 [95% CI: 0.52, 0.74; P < .001]). + + Conclusion + A DL algorithm using current and prior low-dose CT examinations was more effective at estimating 3-year malignancy risk of pulmonary nodules than established models that only use a single CT examination.}, + citation-count = {0}, + file = {Venk23.pdf:pdf\Venk23.pdf:PDF}, + journal = {Radiology}, + pages = {e223308}, + pmid = {37526548}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {f9dd350b17a4b0bce9beef6c372a375315361aad}, + all_ss_ids = {['f9dd350b17a4b0bce9beef6c372a375315361aad']}, + gscites = {1}, +} + @conference{Vent20, author = {de Vente, Coen and van Grinsven, Mark and De Zanet, Sandro and Mosinska, Agata and Sznitman, Raphael and Klaver, Caroline and S\'{a}nchez, Clara I.}, booktitle = ARVO, title = {Estimating Uncertainty of Deep Neural Networks for Age-related Macular Degeneration Grading using Optical Coherence Tomography}, abstract = {Purpose: Deep convolutional neural networks (CNNs) are increasingly being used for eye disease screening and diagnosis. Especially the best performing variants, however, are generally overconfident in their predictions. For usefulness in clinical practice and increasing clinicians' trust on the estimated diagnosis, well-calibrated uncertainty estimates are necessary. We present a method for providing confidence scores of CNNs for age-related macular degeneration (AMD) grading in optical coherence tomography (OCT). - - - Methods: 1,264 OCT volumes from 633 patients from the European Genetic Database (EUGENDA) were graded as one of five stages of AMD (No AMD, Early AMD, Intermediate AMD, Advanced AMD: GA, and Advanced AMD: CNV). Ten different 3D DenseNet-121 models that take a full OCT volume as input were used to predict the corresponding AMD stage. These networks were all trained on the same dataset. However, each of these networks were initialized differently. The class with the maximum average softmax output of these models was used as the final prediction. The confidence measure was the normalized average softmax output for that class. - - Results: The algorithm achieved an area under the Receiver Operating Characteristic of 0.9785 and a quadratic-weighted kappa score of 0.8935. The mean uncertainty, calculated as 1 - the mean confidence score, for incorrect predictions was 1.9 times as high as the mean uncertainty for correct predictions. When only using the probability output of a single network, this ratio was 1.4. Another measure for uncertainty estimation performance is the Expected Calibration Error (ECE), where a lower value is better. When comparing the method to the probability output of a single network, the ECE improved from 0.0971 to 0.0324. Figure 1 shows examples of both confident and unconfident predictions. - - Conclusions: We present a method for improving uncertainty estimation for AMD grading in OCT, by combining the output of multiple individually trained CNNs. This increased reliability of system confidences can contribute to building trust in CNNs for retinal disease screening. Furthermore, this technique is a first step towards selective prediction in retinal disease screening, where only cases with high uncertainty predictions need to be referred for expert evaluation.}, + + + Methods: 1,264 OCT volumes from 633 patients from the European Genetic Database (EUGENDA) were graded as one of five stages of AMD (No AMD, Early AMD, Intermediate AMD, Advanced AMD: GA, and Advanced AMD: CNV). Ten different 3D DenseNet-121 models that take a full OCT volume as input were used to predict the corresponding AMD stage. These networks were all trained on the same dataset. However, each of these networks were initialized differently. The class with the maximum average softmax output of these models was used as the final prediction. The confidence measure was the normalized average softmax output for that class. + + Results: The algorithm achieved an area under the Receiver Operating Characteristic of 0.9785 and a quadratic-weighted kappa score of 0.8935. The mean uncertainty, calculated as 1 - the mean confidence score, for incorrect predictions was 1.9 times as high as the mean uncertainty for correct predictions. When only using the probability output of a single network, this ratio was 1.4. Another measure for uncertainty estimation performance is the Expected Calibration Error (ECE), where a lower value is better. When comparing the method to the probability output of a single network, the ECE improved from 0.0971 to 0.0324. Figure 1 shows examples of both confident and unconfident predictions. + + Conclusions: We present a method for improving uncertainty estimation for AMD grading in OCT, by combining the output of multiple individually trained CNNs. This increased reliability of system confidences can contribute to building trust in CNNs for retinal disease screening. Furthermore, this technique is a first step towards selective prediction in retinal disease screening, where only cases with high uncertainty predictions need to be referred for expert evaluation.}, optnote = {DIAG, RADIOLOGY}, year = {2020}, month = {6}, + all_ss_ids = {fc12f80e0fe56243c26f628d311577507f34b39c}, + gscites = {2}, } @article{Vent21, @@ -25562,6 +31695,8 @@ @article{Vent21 file = {Vent21.pdf:pdf\\Vent21.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/251973}, + all_ss_ids = {['41f386d38567e38132525cad9bdc7da1ad6e8f1c', '441d6e8ba2cc2314d7d44425a158c25e27f9cb96']}, + gscites = {16}, } @conference{Vent21a, @@ -25570,26 +31705,57 @@ @conference{Vent21a url = {https://iovs.arvojournals.org/article.aspx?articleid=2775505}, title = {Making AI Transferable Across OCT Scanners from Different Vendors}, abstract = {Purpose: Deep neural networks (DNNs) for optical coherence tomography (OCT) classification have been proven to work well on images from scanners that were used during training. However, since the appearance of OCT scans can differ greatly between vendors, these DNNs often fail when they are applied to scans from different manufacturers. We propose a DNN architecture for age-related macular degeneration (AMD) grading that maintains performance on OCTs from vendors not included during training. + + Methods: 2,598 and 680 Heidelberg Spectralis OCT scans from the European Genetic Database were used for development and testing, respectively. We tested transferability with 339 AMD-enriched Topcon OCTs from the Rotterdam Study. AMD severity classification was determined manually in accordance with the Cologne Image Reading Center and Laboratory and Rotterdam Classification, respectively. Classifications were harmonized for the evaluation of the DNNs. The proposed DNN considers each B-scan separately using a 2D ResNet-18, and internally combines the intermediate outputs related to each B-scan using a multiple instance learning approach. Even though the proposed DNN provides both B-scan level and OCT-volume level decisions, the architecture is trained end-to-end using only full volume gradings. This specific architecture makes our method robust to the variability of scanning protocols across vendors, as it is invariant to B-scan spacing. We compare this approach to a baseline that classifies the full OCT scan directly using a 3D ResNet-18. + + Results: The quadratic weighted kappa (QWK) for the baseline method dropped from 0.852 on the Heidelberg Spectralis dataset to 0.523 on the Topcon dataset. This QWK drop was smaller (p = 0.001) for our approach, which dropped from 0.849 to 0.717. The difference in area under the Receiver Operating Characteristic (AUC) drop was also smaller (p < 0.001) for our approach (0.969 to 0.906, -6.5%) than for the baseline method (0.971 to 0.806, -17.0%). + + Conclusions: We present a DNN for AMD classification on OCT scans that transfers well to scans from vendors that were not used for development. This alleviates the need for retraining on data from these scanner types, which is an expensive process in terms of data acquisition, model development, and human annotation time. Furthermore, this increases the applicability of AI for OCT classification in broader scopes than the settings in which they were developed.}, + optnote = {DIAG, RADIOLOGY}, + year = {2021}, +} - Methods: 2,598 and 680 Heidelberg Spectralis OCT scans from the European Genetic Database were used for development and testing, respectively. We tested transferability with 339 AMD-enriched Topcon OCTs from the Rotterdam Study. AMD severity classification was determined manually in accordance with the Cologne Image Reading Center and Laboratory and Rotterdam Classification, respectively. Classifications were harmonized for the evaluation of the DNNs. The proposed DNN considers each B-scan separately using a 2D ResNet-18, and internally combines the intermediate outputs related to each B-scan using a multiple instance learning approach. Even though the proposed DNN provides both B-scan level and OCT-volume level decisions, the architecture is trained end-to-end using only full volume gradings. This specific architecture makes our method robust to the variability of scanning protocols across vendors, as it is invariant to B-scan spacing. We compare this approach to a baseline that classifies the full OCT scan directly using a 3D ResNet-18. - - Results: The quadratic weighted kappa (QWK) for the baseline method dropped from 0.852 on the Heidelberg Spectralis dataset to 0.523 on the Topcon dataset. This QWK drop was smaller (p = 0.001) for our approach, which dropped from 0.849 to 0.717. The difference in area under the Receiver Operating Characteristic (AUC) drop was also smaller (p < 0.001) for our approach (0.969 to 0.906, -6.5%) than for the baseline method (0.971 to 0.806, -17.0%). +@article{Vent23, + author = {De Vente, Coen and Vermeer, Koenraad A. and Jaccard, Nicolas and Wang, He and Sun, Hongyi and Khader, Firas and Truhn, Daniel and Aimyshev, Temirgali and Zhanibekuly, Yerkebulan and Le, Tien-Dung and Galdran, Adrian and Ballester, Miguel \'{A}ngel Gonz\'{a}lez and Carneiro, Gustavo and Devika, R G and Hrishikesh, P S and Puthussery, Densen and Liu, Hong and Yang, Zekang and Kondo, Satoshi and Kasai, Satoshi and Wang, Edward and Durvasula, Ashritha and Heras, J\'{o}nathan and Zapata, Miguel \'{A}ngel and Ara\'{u}jo, Teresa and Aresta, Guilherme and Bogunovi\'{c}, Hrvoje and Arikan, Mustafa and Lee, Yeong Chan and Cho, Hyun Bin and Choi, Yoon Ho and Qayyum, Abdul and Razzak, Imran and Van Ginneken, Bram and Lemij, Hans G. and S\'{a}nchez, Clara I.}, + title = {AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge}, + doi = {10.1109/tmi.2023.3313786}, + year = {2023}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/TMI.2023.3313786}, + file = {Vent23.pdf:pdf\Vent23.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {IEEE Transactions on Medical Imaging}, + citation-count = {0}, + automatic = {yes}, + pages = {1-1}, + ss_id = {7981606bf8110ec6cc64baa22d694096f7862939}, + all_ss_ids = {['7981606bf8110ec6cc64baa22d694096f7862939']}, + gscites = {6}, +} - Conclusions: We present a DNN for AMD classification on OCT scans that transfers well to scans from vendors that were not used for development. This alleviates the need for retraining on data from these scanner types, which is an expensive process in terms of data acquisition, model development, and human annotation time. Furthermore, this increases the applicability of AI for OCT classification in broader scopes than the settings in which they were developed.}, +@article{Vent23a, + author = {de Vente, Coen and van Ginneken, Bram and Hoyng, Carel B. and Klaver, Caroline C. W. and S\'{a}nchez, Clara I.}, + title = {~~Uncertainty-Aware Multiple-Instance Learning for Reliable Classification: Application to Optical Coherence Tomography}, + doi = {10.48550/ARXIV.2302.03116}, + year = {2023}, + abstract = {Deep learning classification models for medical image analysis often perform well on data from scanners that were used during training. However, when these models are applied to data from different vendors, their performance tends to drop substantially. Artifacts that only occur within scans from specific scanners are major causes of this poor generalizability. We aimed to improve the reliability of deep learning classification models by proposing Uncertainty-Based Instance eXclusion (UBIX). This technique, based on multiple-instance learning, reduces the effect of corrupted instances on the bag-classification by seamlessly integrating out-of-distribution (OOD) instance detection during inference. Although UBIX is generally applicable to different medical images and diverse classification tasks, we focused on staging of age-related macular degeneration in optical coherence tomography. After being trained using images from one vendor, UBIX showed a reliable behavior, with a slight decrease in performance (a decrease of the quadratic weighted kappa ($\kappa$\textsubscript{w}) from 0.861 to 0.708), when applied to images from different vendors containing artifacts; while a state-of-the-art 3D neural network suffered from a significant detriment of performance ($\kappa$\textsubscript{w} from 0.852 to 0.084) on the same test set. We showed that instances with unseen artifacts can be identified with OOD detection and their contribution to the bag-level predictions can be reduced, improving reliability without the need for retraining on new data. This potentially increases the applicability of artificial intelligence models to data from other scanners than the ones for which they were developed.}, + url = {https://arxiv.org/abs/2302.03116}, + file = {Vent23a.pdf:pdf\\Vent23a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, - year = {2021}, + journal = {arXiv:2302.03116}, + automatic = {yes}, } @mastersthesis{Verb21, author = {Jeroen Verboom}, title = {Deep Learning for Fracture Detection in the Radius and Ulna on Conventional Radiographs}, abstract = {This work gives a compartmentalized overview of a fracture detection tool to detect and localize fractures in the radius and ulna on conventional radiographs using deep learning. - This contrasts earlier studies in that it proposes a more efficient object detector, demonstrates the generalizability of fracture detection models to data from a different hospital, and employs more advanced class activation mapping methods for fracture localization. - Both RadboudUMC and the Jeroen Bosch Ziekenhuis provided data to create a multi-institutional dataset. - The two data sources enabled me to demonstrate how fracture detection classifiers trained on data from only one institution significantly perform less when tested on data from another institution. - Moreover, this study demonstrates a more efficient bone localization method that yields adequate performance to be used for cropping regions of interest, and a newer fracture localization method (ScoreCAM) that outperforms its predecessors in terms of highlighting less redundant information. - I conclude that the algorithms presented in this work show the potential to be incorporated in a clinically usable fracture detection tool. - However, more research needs to be conducted using multi-institutional for training fracture detection classifiers.}, + This contrasts earlier studies in that it proposes a more efficient object detector, demonstrates the generalizability of fracture detection models to data from a different hospital, and employs more advanced class activation mapping methods for fracture localization. + Both RadboudUMC and the Jeroen Bosch Ziekenhuis provided data to create a multi-institutional dataset. + The two data sources enabled me to demonstrate how fracture detection classifiers trained on data from only one institution significantly perform less when tested on data from another institution. + Moreover, this study demonstrates a more efficient bone localization method that yields adequate performance to be used for cropping regions of interest, and a newer fracture localization method (ScoreCAM) that outperforms its predecessors in terms of highlighting less redundant information. + I conclude that the algorithms presented in this work show the potential to be incorporated in a clinically usable fracture detection tool. + However, more research needs to be conducted using multi-institutional for training fracture detection classifiers.}, file = {Verb21.pdf:pdf/Verb21.pdf:PDF}, optnote = {DIAG}, school = {Tilburg University}, @@ -25597,7 +31763,24 @@ @mastersthesis{Verb21 journal = {Master thesis}, } -@inproceedings{Verm21, +@article{Verh18, + author = {Verhagen, Martijn V. and Smets, Anne M.J.B. and van Schuppen, Joost and Deurloo, Eline E. and Schaefer-Prokop, Cornelia}, + title = {~~The impact of reconstruction techniques on observer performance for the detection and characterization of small pulmonary nodules in chest CT of children under 13 years}, + doi = {10.1016/j.ejrad.2018.01.015}, + year = {2018}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejrad.2018.01.015}, + file = {Verh18.pdf:pdf\\Verh18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + automatic = {yes}, + citation-count = {6}, + pages = {142-146}, + volume = {100}, + pmid = {29496073}, +} + +@inproceedings{Verm21, author = {Vermazeren, Jeroen and van Eekelen, Leander and Meesters, Luca and Looijen-Salamon, Monika and Vos, Shoko and Munari, Enrico and Mercan, Caner and Ciompi, Francesco}, booktitle = MIDL, title = {muPEN: Multi-class PseudoEdgeNet for PD-L1 assessment}, @@ -25607,6 +31790,23 @@ @inproceedings{Verm21 year = {2021}, } +@article{Vers20, + author = {Versteegh, VE and Welvaart, WN and Oberink-Gustafsson, EEM and Lindenholz, A and Staaks, GHA and Schaefer-Prokop, CM}, + title = {~~Non-traumatic complications of a solitary rib osteochondroma; an unusual cause of hemoptysis and pneumothorax}, + doi = {10.1259/bjrcr.20200015}, + year = {2020}, + abstract = { Osteochondromas are a very common and usually asymptomatic entity which may originate anywhere in the appendicular and axial skeleton. However, the ribs are a rare site of origin and here they may prove symptomatic for mechanical reasons. In this case report, we describe an unusual case of a symptomatic osteochondroma of the rib secondary to its location and unique shape, ultimately requiring surgical intervention. }, + url = {http://dx.doi.org/10.1259/bjrcr.20200015}, + file = {Vers20.pdf:pdf\\Vers20.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {BJR|case reports}, + automatic = {yes}, + citation-count = {1}, + pages = {20200015}, + volume = {6}, + pmid = {32922844}, +} + @article{Veta18, author = {Mitko Veta and Yujing J. Heng and Nikolas Stathonikos and Babak Ehteshami Bejnordi and Francisco Beca and Thomas Wollmann and Karl Rohr and Manan A. Shah and Dayong Wang and Mikael Rousson and Martin Hedlund and David Tellez and Francesco Ciompi and Erwan Zerhouni and David Lanyi and Matheus Viana and Vassili Kovalev and Vitali Liauchuk and Hady Ahmady Phoulady and Talha Qaiser and Simon Graham and Nasir Rajpoot and Erik Sjoblom and Jesper Molin and Kyunghyun Paeng and Sangheum Hwang and Sunggyun Park and Zhipeng Jia and Eric I-Chao Chang and Yan Xu and Andrew H. Beck and Paul J. van Diest and Josien P. W. Pluim}, title = {Predicting breast tumor proliferation from whole-slide images: the {TUPAC16} challenge}, @@ -25622,7 +31822,9 @@ @article{Veta18 pmid = {30861443}, month = {5}, gsid = {11612591462765985317}, - gscites = {56}, + gscites = {207}, + ss_id = {c377304af802f1ea67c6d696806a69f516a4472f}, + all_ss_ids = {['c377304af802f1ea67c6d696806a69f516a4472f']}, } @inproceedings{Vijv16, @@ -25639,6 +31841,57 @@ @inproceedings{Vijv16 month = {3}, gsid = {1469412038484421444}, gscites = {3}, + ss_id = {259805d3206e98af32a66eaf8fc0dfe73f595c4d}, + all_ss_ids = {['259805d3206e98af32a66eaf8fc0dfe73f595c4d']}, +} + +@article{Vina22, + author = {Vinayahalingam, Shankeeth and van Nistelrooij, Niels and van Ginneken, Bram and Bressem, Keno and Troltzsch, Daniel and Heiland, Max and Flugge, Tabea and Gaudin, Robert}, + title = {Detection of mandibular fractures on panoramic radiographs using deep learning.}, + doi = {10.1038/s41598-022-23445-w}, + issue = {1}, + number = {1}, + pages = {19596}, + volume = {12}, + abstract = {Mandibular fractures are among the most frequent facial traumas in oral and maxillofacial surgery, accounting for 57% of cases. An accurate diagnosis and appropriate treatment plan are vital in achieving optimal re-establishment of occlusion, function and facial aesthetics. This study aims to detect mandibular fractures on panoramic radiographs (PR) automatically. 1624 PR with fractures were manually annotated and labelled as a reference. A deep learning approach based on Faster R-CNN and Swin-Transformer was trained and validated on 1640 PR with and without fractures. Subsequently, the trained algorithm was applied to a test set consisting of 149 PR with and 171 PR without fractures. The detection accuracy and the area-under-the-curve (AUC) were calculated. The proposed method achieved an F1 score of 0.947 and an AUC of 0.977. Deep learning-based assistance of clinicians may reduce the misdiagnosis and hence the severe complications.}, + file = {Vina22.pdf:pdf\\Vina22.pdf:PDF}, + journal = {Scientific reports}, + optnote = {DIAG, RADIOLOGY}, + pmid = {36379971}, + publisher = {Springer Science and Business Media {LLC}}, + year = {2022}, + ss_id = {6df1dd59f69d05d00e0b41b3220781b614584362}, + all_ss_ids = {['6df1dd59f69d05d00e0b41b3220781b614584362']}, + gscites = {4}, +} + +@article{Vina23, + author = {Vinayahalingam, Shankeeth and Kempers, Steven and Schoep, Julian and Hsu, Tzu-Ming Harry and Moin, David Anssari and van Ginneken, Bram and Fl\"{u}gge, Tabea and Hanisch, Marcel and Xi, Tong}, + title = {Intra-oral scan segmentation using deep learning}, + doi = {10.1186/s12903-023-03362-8}, + url = {http://dx.doi.org/10.1186/s12903-023-03362-8}, + volume = {23}, + abstract = {Abstract + Objective + Intra-oral scans and gypsum cast scans (OS) are widely used in orthodontics, prosthetics, implantology, and orthognathic surgery to plan patient-specific treatments, which require teeth segmentations with high accuracy and resolution. Manual teeth segmentation, the gold standard up until now, is time-consuming, tedious, and observer-dependent. This study aims to develop an automated teeth segmentation and labeling system using deep learning. + + Material and methods + As a reference, 1750 OS were manually segmented and labeled. A deep-learning approach based on PointCNN and 3D U-net in combination with a rule-based heuristic algorithm and a combinatorial search algorithm was trained and validated on 1400 OS. Subsequently, the trained algorithm was applied to a test set consisting of 350 OS. The intersection over union (IoU), as a measure of accuracy, was calculated to quantify the degree of similarity between the annotated ground truth and the model predictions. + + Results + The model achieved accurate teeth segmentations with a mean IoU score of 0.915. The FDI labels of the teeth were predicted with a mean accuracy of 0.894. The optical inspection showed excellent position agreements between the automatically and manually segmented teeth components. Minor flaws were mostly seen at the edges. + + Conclusion + The proposed method forms a promising foundation for time-effective and observer-independent teeth segmentation and labeling on intra-oral scans. + + Clinical significance + Deep learning may assist clinicians in virtual treatment planning in orthodontics, prosthetics, implantology, and orthognathic surgery. The impact of using such models in clinical practice should be explored.}, + citation-count = {0}, + file = {Vina23.pdf:pdf\Vina23.pdf:PDF}, + journal = {BMC Oral Health}, + optnote = {DIAG, RADIOLOGY}, + pmid = {37670290}, + year = {2023}, } @article{Vink88, @@ -25655,6 +31908,9 @@ @article{Vink88 number = {5}, pmid = {3177896}, month = {8}, + ss_id = {8f9a31c0e676328f5beb217a9d0a17773eb663ab}, + all_ss_ids = {['8f9a31c0e676328f5beb217a9d0a17773eb663ab']}, + gscites = {19}, } @article{Viss12, @@ -25673,6 +31929,8 @@ @article{Viss12 month = {11}, gsid = {8488911132953566154}, gscites = {19}, + ss_id = {cebcb3fcae6f033310f2b02000109d815f58ce4f}, + all_ss_ids = {['cebcb3fcae6f033310f2b02000109d815f58ce4f']}, } @article{Vlie22, @@ -25689,6 +31947,9 @@ @article{Vlie22 volume = {27}, number = {10}, pages = {818--833}, + ss_id = {08c1b02b22c53480dcf792b8346a5ae416a7bc00}, + all_ss_ids = {['08c1b02b22c53480dcf792b8346a5ae416a7bc00']}, + gscites = {13}, } @article{Voge05, @@ -25705,8 +31966,10 @@ @article{Voge05 pmid = {15690224}, month = {11}, gsid = {7276742739793309998}, - gscites = {5}, + gscites = {6}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/47875}, + ss_id = {f215d3cfc9eca1506102addb38c3556bbfbcebfb}, + all_ss_ids = {['f215d3cfc9eca1506102addb38c3556bbfbcebfb']}, } @article{Voge07, @@ -25724,8 +31987,10 @@ @article{Voge07 pmid = {17504865}, month = {6}, gsid = {5484873770485079748}, - gscites = {43}, + gscites = {45}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/51475}, + ss_id = {3abdd710e114c17d1575d5f3342cf6a941dc9264}, + all_ss_ids = {['3abdd710e114c17d1575d5f3342cf6a941dc9264']}, } @inproceedings{Vos07, @@ -25739,6 +32004,8 @@ @inproceedings{Vos07 optnote = {DIAG, RADIOLOGY}, gsid = {13280836963282463277}, gscites = {7}, + ss_id = {795bde2ea7cf38546ce966acc3899fbef2bdeeb9}, + all_ss_ids = {['795bde2ea7cf38546ce966acc3899fbef2bdeeb9']}, } @article{Vos08, @@ -25756,8 +32023,10 @@ @article{Vos08 pmid = {18404925}, month = {2}, gsid = {13212291345759536000}, - gscites = {103}, + gscites = {105}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/71452}, + ss_id = {f83088af99113dbba4978eea89b86d853b6e6a4c}, + all_ss_ids = {['f83088af99113dbba4978eea89b86d853b6e6a4c']}, } @inproceedings{Vos08a, @@ -25772,6 +32041,8 @@ @inproceedings{Vos08a month = {3}, gsid = {10763355599207768493}, gscites = {12}, + ss_id = {4a7d40986eb610dcc4ad156951d40518370e8682}, + all_ss_ids = {['4a7d40986eb610dcc4ad156951d40518370e8682']}, } @inproceedings{Vos09, @@ -25787,7 +32058,9 @@ @inproceedings{Vos09 optnote = {DIAG, RADIOLOGY}, pmid = {20426189}, gsid = {16369511798735227224}, - gscites = {14}, + gscites = {15}, + ss_id = {fb2365989a762a4e4bf749a126266d6a1cc1de68}, + all_ss_ids = {['fb2365989a762a4e4bf749a126266d6a1cc1de68']}, } @article{Vos10, @@ -25807,6 +32080,8 @@ @article{Vos10 gsid = {8022713700082473019}, gscites = {116}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/89494}, + ss_id = {c27e6795e38902084b5b4d113ff6fff3927c1873}, + all_ss_ids = {['c27e6795e38902084b5b4d113ff6fff3927c1873']}, } @conference{Vos10a, @@ -25856,8 +32131,10 @@ @article{Vos12a pmid = {22391091}, month = {3}, gsid = {13455779673289424103}, - gscites = {119}, + gscites = {120}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/110343}, + ss_id = {aa4b53329efa2fa1bc2aae5ab34baa945945125b}, + all_ss_ids = {['aa4b53329efa2fa1bc2aae5ab34baa945945125b']}, } @article{Vos13, @@ -25873,8 +32150,10 @@ @article{Vos13 pmid = {23751135}, month = {9}, gsid = {115460116805624360}, - gscites = {150}, + gscites = {164}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/117989}, + ss_id = {7c673c88257a140be530d9f6503e4737c06d8a7a}, + all_ss_ids = {['7c673c88257a140be530d9f6503e4737c06d8a7a']}, } @conference{Vos14c, @@ -25911,6 +32190,9 @@ @article{Vos19 doi = {10.1109/TMI.2019.2899534}, optnote = {DIAG, RADIOLOGY}, file = {Vos19.pdf:pdf\\Vos19.pdf:PDF}, + ss_id = {825003ce25765c8b21815246ebf739eafcce5621}, + all_ss_ids = {['825003ce25765c8b21815246ebf739eafcce5621']}, + gscites = {73}, } @article{Vos21, @@ -25927,6 +32209,9 @@ @article{Vos21 file = {Vos21.pdf:pdf\\Vos21.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/235733}, + ss_id = {f29eb9082f4fd1b632eeea5f668671e6ca685d29}, + all_ss_ids = {['f29eb9082f4fd1b632eeea5f668671e6ca685d29']}, + gscites = {5}, } @conference{Vree15, @@ -25944,19 +32229,19 @@ @conference{Vree15a booktitle = RSNA, year = {2015}, abstract = {PURPOSE - The purpose of this study was to evaluate the visibility of MR screen detected cancers on prior MR examinations in a population with an elevated risk for breast cancer. - - METHOD AND MATERIALS - An IRB approved, retrospective review of patient files from women screened with breast MRI between 2003 and 2013 was conducted at our academic center. We selected all cases detected in MRI with a prior negative MR examination performed between 6 and 24 months before a cancer was revealed (mean: 12.8 A-A?A 1/2 3.7 months). This yielded 43 cancers (3 invasive lobular-, 33 invasive ductal carcinomas, 5 ductal carcinoma in situ and 2 others) in 41 patients (age: 49 A-A?A 1/2 9.8 years, 21 BRCA patients). The MR scans where the cancers were detected (diagnostic MR scan) and the prior MR scans were evaluated side-by-side in consensus by two dedicated breast radiologists. The visibility of the cancers on prior scans was rated as: visible (BIRADS 4/5), minimal sign (BIRADS 2/3), or invisible (BIRADS 1). Chi-square tests were used to test the correlation between patient and cancer characteristics, image quality (IQ), background parenchymal enhancement (BPE), and visibility of the tumor in the prior MR scan. - - RESULTS - All lesions were retrospectively evident on the diagnostic MR scan. Review of the prior examinations of the 43 cancers detected in follow-up rounds revealed that 11 lesions (26%) were visible in the prior MRI and should have been recalled at the time of this scan. 15 lesions (35%) showed a minimal sign in the prior MRI. Only 17 lesions (40%) were completely invisible. High grade, ER negative, and PR negative tumors were more often invisible in the prior scan (p=0.016, p=0.005, and p=0.002). Moreover, tumors in BRCA patients were more likely to be invisible in the prior scan, than in non-BRCA carriers (p=0.025). IQ and BPE were not significantly related to the visibility of tumors in the prior scan. - - CONCLUSION - About 26% of the breast cancers could have been recalled earlier and only 40% of the breast cancers were invisible in retrospect. - - CLINICAL RELEVANCE/APPLICATION - To prevent screening errors regular auditing of clinical practice is indicated. Moreover, like in mammography, structural double reading of MRI screening examinations may be recommended.}, + The purpose of this study was to evaluate the visibility of MR screen detected cancers on prior MR examinations in a population with an elevated risk for breast cancer. + + METHOD AND MATERIALS + An IRB approved, retrospective review of patient files from women screened with breast MRI between 2003 and 2013 was conducted at our academic center. We selected all cases detected in MRI with a prior negative MR examination performed between 6 and 24 months before a cancer was revealed (mean: 12.8 A-A?A 1/2 3.7 months). This yielded 43 cancers (3 invasive lobular-, 33 invasive ductal carcinomas, 5 ductal carcinoma in situ and 2 others) in 41 patients (age: 49 A-A?A 1/2 9.8 years, 21 BRCA patients). The MR scans where the cancers were detected (diagnostic MR scan) and the prior MR scans were evaluated side-by-side in consensus by two dedicated breast radiologists. The visibility of the cancers on prior scans was rated as: visible (BIRADS 4/5), minimal sign (BIRADS 2/3), or invisible (BIRADS 1). Chi-square tests were used to test the correlation between patient and cancer characteristics, image quality (IQ), background parenchymal enhancement (BPE), and visibility of the tumor in the prior MR scan. + + RESULTS + All lesions were retrospectively evident on the diagnostic MR scan. Review of the prior examinations of the 43 cancers detected in follow-up rounds revealed that 11 lesions (26%) were visible in the prior MRI and should have been recalled at the time of this scan. 15 lesions (35%) showed a minimal sign in the prior MRI. Only 17 lesions (40%) were completely invisible. High grade, ER negative, and PR negative tumors were more often invisible in the prior scan (p=0.016, p=0.005, and p=0.002). Moreover, tumors in BRCA patients were more likely to be invisible in the prior scan, than in non-BRCA carriers (p=0.025). IQ and BPE were not significantly related to the visibility of tumors in the prior scan. + + CONCLUSION + About 26% of the breast cancers could have been recalled earlier and only 40% of the breast cancers were invisible in retrospect. + + CLINICAL RELEVANCE/APPLICATION + To prevent screening errors regular auditing of clinical practice is indicated. Moreover, like in mammography, structural double reading of MRI screening examinations may be recommended.}, optnote = {DIAG, RADIOLOGY}, } @@ -25966,19 +32251,19 @@ @conference{Vree15b booktitle = RSNA, year = {2015}, abstract = {PURPOSE - Breast cancer screening in women at elevated risk is performed with yearly MRI and mammography. This includes women with BRCA mutations and women at elevated risk for other causes (mainly family history). The purpose of this study was to assess differences between BRCA mutation carriers and non-BRCA patients in a longitudinal MRI screening program in terms of recall rate, positive predictive value, and detection. - - METHOD AND MATERIALS - An IRB approved, retrospective review of patient files from women screened with breast MRI between 2003 and 2013 was performed at our academic center. We analysed 9.504 screening MR examinations in 2843 women (age: 45 A-A?A 1/2 12.09 years), including 761 BRCA patients, and 2082 non-BRCA patients. Recall rate (RR), positive predictive value (PPV), and cancer detection rate (CDR) were evaluated for first round examinations and follow-up examinations separately. BRCA patients were compared with non-BRCA patients. Chi-square tests were used to determine statistical significance. - - RESULTS - The RR for BRCA patients in the first round of screening was 86.07 per 1000 examinations and 52.58 per 1000 examinations in non-BRCA patients (p<0.001). The PPV for BRCA patients in the first round of screening was found to be 0.44, compared to 0.50 in non-BRCA patients (p=0.013). The CDR was 38.25 per 1000 examinations for BRCA patients and 26.53 per 1000 examinations for non-BRCA patients (p<0.001). In follow up, the RR was found to be 24.92 per 1000 examinations for BRCA patients and 22.81 per 1000 examinations for non-BRCA patients (p<0.001). The PPV was 0.46 for BRCA patients and 0.21 for non-BRCA patients (p<0.001). CDR was 11.42 per 1000 examinations for BRCA patients and 4.86 per 1000 examinations for non-BRCA patients (p<0.001). - - CONCLUSION - RR and CDR are high for all patients in the first round. RR and CDR significantly decreased in follow-up rounds (p<0.001). PPV remained at an acceptable level for both patient groups, and remains particularly high in BRCA carriers. RR, PPV, and CDR differed significantly between BRCA and non-BRCA patients in both first and follow up rounds. - - CLINICAL RELEVANCE/APPLICATION - These results underline that MRI is an excellent tool for screening high risk patients. Cancer detection is very high in the first round in all patients, but remains high only in BRCA carriers in follow up rounds.}, + Breast cancer screening in women at elevated risk is performed with yearly MRI and mammography. This includes women with BRCA mutations and women at elevated risk for other causes (mainly family history). The purpose of this study was to assess differences between BRCA mutation carriers and non-BRCA patients in a longitudinal MRI screening program in terms of recall rate, positive predictive value, and detection. + + METHOD AND MATERIALS + An IRB approved, retrospective review of patient files from women screened with breast MRI between 2003 and 2013 was performed at our academic center. We analysed 9.504 screening MR examinations in 2843 women (age: 45 A-A?A 1/2 12.09 years), including 761 BRCA patients, and 2082 non-BRCA patients. Recall rate (RR), positive predictive value (PPV), and cancer detection rate (CDR) were evaluated for first round examinations and follow-up examinations separately. BRCA patients were compared with non-BRCA patients. Chi-square tests were used to determine statistical significance. + + RESULTS + The RR for BRCA patients in the first round of screening was 86.07 per 1000 examinations and 52.58 per 1000 examinations in non-BRCA patients (p<0.001). The PPV for BRCA patients in the first round of screening was found to be 0.44, compared to 0.50 in non-BRCA patients (p=0.013). The CDR was 38.25 per 1000 examinations for BRCA patients and 26.53 per 1000 examinations for non-BRCA patients (p<0.001). In follow up, the RR was found to be 24.92 per 1000 examinations for BRCA patients and 22.81 per 1000 examinations for non-BRCA patients (p<0.001). The PPV was 0.46 for BRCA patients and 0.21 for non-BRCA patients (p<0.001). CDR was 11.42 per 1000 examinations for BRCA patients and 4.86 per 1000 examinations for non-BRCA patients (p<0.001). + + CONCLUSION + RR and CDR are high for all patients in the first round. RR and CDR significantly decreased in follow-up rounds (p<0.001). PPV remained at an acceptable level for both patient groups, and remains particularly high in BRCA carriers. RR, PPV, and CDR differed significantly between BRCA and non-BRCA patients in both first and follow up rounds. + + CLINICAL RELEVANCE/APPLICATION + These results underline that MRI is an excellent tool for screening high risk patients. Cancer detection is very high in the first round in all patients, but remains high only in BRCA carriers in follow up rounds.}, optnote = {DIAG, RADIOLOGY}, } @@ -25988,25 +32273,25 @@ @conference{Vree16 booktitle = ISMRM, year = {2016}, abstract = {Synopsis: Women at increased risk for breast cancer require annual mammography and MRI. The purpose of this study is to evaluate cancers detected in MRI screening and assess the visibility on prior MRI-examinations. MRI-scans of breast cancers detected in our MRI screening program were re-evaluated and lesions on the diagnostic MRI and prior MRI were scored according to Breast Imaging Reporting and Data (BI-RADS) MR-lexicon. The visibility of the lesions on the prior MRI was rated as visible, minimal sign and invisible. Our results show that almost one third of the breast cancers should have been recalled based on consensus review. - Purpose: - Breast cancer is a main cause of cancer death, especially in women at increased risk for breast cancer. This risk is defined as a cumulative lifetime risk of more than 20%, and can be as high as 57% at the age of 70 in BRCA1-carriers.1 Screening with only mammography is insufficient in these women and supplemental annual breast MRI is currently required.2 In mammography screening it is regular practice to evaluate breast cancers detected in incident screening rounds and cancers detected in between screening round (interval cancers), and assess whether these cancers could have been detected earlier.3,4 This is rare for MRI screening. The purpose of this study is to evaluate breast cancers detected in an intermediate and high risk screening program, and assess the visibility of these cancers on prior MRI examinations. To detect possible causes for non-recall, we investigated imaging, patient, and cancer characteristics. - Methods: - This retrospective study was approved by our local institutional board and the requirement for informed consent was waived. We collected all breast MRI screening examinations in the period from January 2003 - January 2014. To find all malignant lesions in this population, corresponding patient files were linked to the Netherlands Cancer Registry (NCR). For each patient with breast cancer detected on an MRI-screen or interval cancer, we identified whether another MRI-screen in 6 - 24 months before cancer detection was available (prior MRI). These MRI-scans were re-evaluated together with the MRI-scan in which cancer was detected (diagnostic MRI) in consensus by two radiologists with 8 and 12 years experience. The review was performed on an in-house developed breast MRI workstation, which provided T1-weighted images for all time points for both current and prior DCE-MRI, subtraction images, and their maximum intensity projection. Images were motion corrected using an algorithm described in Gubern-MA-A?A 1/2 rida et al.5 No T2-weighted images or diffusion-weighted images were shown. On the diagnostic MRI morphological and enhancement characteristics of the known cancer were scored according to the Breast Imaging Reporting and Data (BI-RADS) MR-lexicon.6 In addition, background parenchymal enhancement (BPE) was scored as minimal (<25%), mild (25-50%), moderate (50-75%) or marked (>75%), and image quality (IQ) was scored as perfect, sufficient or bad. Thereafter, the prior MRI was analyzed. The visibility of the lesion, previously identified in the diagnostic MRI, was rated as visible (BI-RADS 4/5), minimally visible (BI-RADS 2/3), or invisible (BI-RADS 1) (Fig.1). In lesions classified as visible or minimally visible morphology and enhancement characteristics were scored. Pearsons chi-square tests were used to test if imaging, patient, and cancer characteristics affect the visibility of the tumor on the prior MRI. Statistics were performed in SPSS. - Results: - From January 2003 - January 2014, 10120 MRI-examinations were performed in 2797 women, including 807 BRCA-mutation carriers. In total, 153 cancers were found. For 69 screen-detected tumors a prior MRI was available (36 tumors in patients with a BRCA mutation). In retrospect, 20 (29%) tumors were visible on the prior MRI, 26 (38%) showed a minimal sign, and 23 (33%) were invisible. Furthermore, prior MRIs were also available for 12 interval cancers (6 tumors in patients with a BRCA mutation); 3 (25%) were visible, 4 (33%) showed a minimal sign, and 5 (33%) were not visible on the prior MRI. Tumors in BRCA patients, small tumors, tumors of high grade and hormone-negative tumors were more likely to be invisible on the prior MRI (p<0.001, p=0.039, p<0.001, p<0.001, respectively). The lack of detection of lesions scored as visible on the prior MRI was not related to BPE or IQ. - Discussion: - A successful MRI screening program is based on the balance between the early detection and the false positive findings that result in unnecessary biopsies and anxiety. This might explain why not all visible lesions get recalled. However, in our study we show that almost one third of cancers were already visible on the prior MRI scan in retrospect and should have been recalled according to our consensus review. This fraction was similar for screen detected and interval cancers. A possible reason for the non-recall could be that the visible lesions were already present at an earlier time point and were regarded stable over time. Non-recall was not related to BPE or IQ. - Conclusion: - It was seen that 28% of breast cancers should have been recalled earlier based on consensus review. Only 35% was completely invisible in retrospect. This indicates that even highly specialized breast cancer screening programs can still be improved and that regular evaluation of screening practice is essential. - References: - 1.S. Chen et al. Meta-analysis of BRCA1 and BRCA2 penetrance. JCO (2007), 25(11):1329-33 - 2.D. Saslow et al. American Cancer Society guidelines for breast screening with MRI as an adjunct to mammography. CA (2007), 57:75-89 - 3.D.M. Ikeda et al. Analysis of 172 subtle findings on prior normal mammograms in women with breast cancer detected at follow-up screening. Radiology (2003), 226(2):494-503 - 4.A.J. Maxwell et al. A study of breast cancers detected in the incident round of the UK NHS Breast Screening Programme: the importance of early detection and treatment of ductal carcinoma in situ. Breast (2001), 10(5):392-8 - 5.A. Gubern-Merida et al. Automated localization of breast cancer in DCE-MRI. Med Imag Anal (2015),20(1):265-74 - 6.BI-RADS Atlas, 5th ed 2013 - - Acknowledgements: European Unions 7FP (Grant 601040)}, + Purpose: + Breast cancer is a main cause of cancer death, especially in women at increased risk for breast cancer. This risk is defined as a cumulative lifetime risk of more than 20%, and can be as high as 57% at the age of 70 in BRCA1-carriers.1 Screening with only mammography is insufficient in these women and supplemental annual breast MRI is currently required.2 In mammography screening it is regular practice to evaluate breast cancers detected in incident screening rounds and cancers detected in between screening round (interval cancers), and assess whether these cancers could have been detected earlier.3,4 This is rare for MRI screening. The purpose of this study is to evaluate breast cancers detected in an intermediate and high risk screening program, and assess the visibility of these cancers on prior MRI examinations. To detect possible causes for non-recall, we investigated imaging, patient, and cancer characteristics. + Methods: + This retrospective study was approved by our local institutional board and the requirement for informed consent was waived. We collected all breast MRI screening examinations in the period from January 2003 - January 2014. To find all malignant lesions in this population, corresponding patient files were linked to the Netherlands Cancer Registry (NCR). For each patient with breast cancer detected on an MRI-screen or interval cancer, we identified whether another MRI-screen in 6 - 24 months before cancer detection was available (prior MRI). These MRI-scans were re-evaluated together with the MRI-scan in which cancer was detected (diagnostic MRI) in consensus by two radiologists with 8 and 12 years experience. The review was performed on an in-house developed breast MRI workstation, which provided T1-weighted images for all time points for both current and prior DCE-MRI, subtraction images, and their maximum intensity projection. Images were motion corrected using an algorithm described in Gubern-MA-A?A 1/2 rida et al.5 No T2-weighted images or diffusion-weighted images were shown. On the diagnostic MRI morphological and enhancement characteristics of the known cancer were scored according to the Breast Imaging Reporting and Data (BI-RADS) MR-lexicon.6 In addition, background parenchymal enhancement (BPE) was scored as minimal (<25%), mild (25-50%), moderate (50-75%) or marked (>75%), and image quality (IQ) was scored as perfect, sufficient or bad. Thereafter, the prior MRI was analyzed. The visibility of the lesion, previously identified in the diagnostic MRI, was rated as visible (BI-RADS 4/5), minimally visible (BI-RADS 2/3), or invisible (BI-RADS 1) (Fig.1). In lesions classified as visible or minimally visible morphology and enhancement characteristics were scored. Pearsons chi-square tests were used to test if imaging, patient, and cancer characteristics affect the visibility of the tumor on the prior MRI. Statistics were performed in SPSS. + Results: + From January 2003 - January 2014, 10120 MRI-examinations were performed in 2797 women, including 807 BRCA-mutation carriers. In total, 153 cancers were found. For 69 screen-detected tumors a prior MRI was available (36 tumors in patients with a BRCA mutation). In retrospect, 20 (29%) tumors were visible on the prior MRI, 26 (38%) showed a minimal sign, and 23 (33%) were invisible. Furthermore, prior MRIs were also available for 12 interval cancers (6 tumors in patients with a BRCA mutation); 3 (25%) were visible, 4 (33%) showed a minimal sign, and 5 (33%) were not visible on the prior MRI. Tumors in BRCA patients, small tumors, tumors of high grade and hormone-negative tumors were more likely to be invisible on the prior MRI (p<0.001, p=0.039, p<0.001, p<0.001, respectively). The lack of detection of lesions scored as visible on the prior MRI was not related to BPE or IQ. + Discussion: + A successful MRI screening program is based on the balance between the early detection and the false positive findings that result in unnecessary biopsies and anxiety. This might explain why not all visible lesions get recalled. However, in our study we show that almost one third of cancers were already visible on the prior MRI scan in retrospect and should have been recalled according to our consensus review. This fraction was similar for screen detected and interval cancers. A possible reason for the non-recall could be that the visible lesions were already present at an earlier time point and were regarded stable over time. Non-recall was not related to BPE or IQ. + Conclusion: + It was seen that 28% of breast cancers should have been recalled earlier based on consensus review. Only 35% was completely invisible in retrospect. This indicates that even highly specialized breast cancer screening programs can still be improved and that regular evaluation of screening practice is essential. + References: + 1.S. Chen et al. Meta-analysis of BRCA1 and BRCA2 penetrance. JCO (2007), 25(11):1329-33 + 2.D. Saslow et al. American Cancer Society guidelines for breast screening with MRI as an adjunct to mammography. CA (2007), 57:75-89 + 3.D.M. Ikeda et al. Analysis of 172 subtle findings on prior normal mammograms in women with breast cancer detected at follow-up screening. Radiology (2003), 226(2):494-503 + 4.A.J. Maxwell et al. A study of breast cancers detected in the incident round of the UK NHS Breast Screening Programme: the importance of early detection and treatment of ductal carcinoma in situ. Breast (2001), 10(5):392-8 + 5.A. Gubern-Merida et al. Automated localization of breast cancer in DCE-MRI. Med Imag Anal (2015),20(1):265-74 + 6.BI-RADS Atlas, 5th ed 2013 + + Acknowledgements: European Unions 7FP (Grant 601040)}, optnote = {DIAG, RADIOLOGY}, } @@ -26016,9 +32301,9 @@ @conference{Vree16a booktitle = EBCC, year = {2016}, abstract = {Background: Intensive screening with annual mammography and MRI is offered to women at high risk for the development of breast cancer. Although most cancers are screen detected, screening does not prevent breast cancers from occurring and some are still detected between screening rounds (true interval cancers). Consequently, some women opt for prophylactic mastectomy rather than intensive screening since this reduces the incidence of breast cancer. Unfortunately, detection of cancer in a prophylactic mastectomy specimen (incident cancers) is not a rare occurrence. It is unsure whether these cancers should be considered as interval cancers. This study evaluates the prognostic factors of cancers stratified by the mode of tumor detection in these women. - Material and methods: Review of our intermediate and high risk screening program from 2003 to 2013 identified 177 cancers. Of these, 136 were detected in screening, 15 cancers were true interval carcinomas detected due to symptoms, and 26 cancers were detected in prophylactic mastectomy specimens. Patient- and cancer characteristics (invasive versus in-situ disease, grade, pT-stage, age, menopausal state, cancer receptor status and pN-stage) between these three groups were compared using a Pearson's chi-square test for categorical variables or one-way ANOVA for continuous variables. - Results: The fraction of invasive disease was 8/26 (30.8%), 109/136 (80.1%) and 15/15 (100%) for cancers in prophylactic mastectomy specimens, screen detected cancers and interval cancers, respectively (p<0.001). The fraction of cancers larger than two centimeters was 1/26 (3.8%), 24/136 (17.6%) and 3/15 (20.0%), respectively. A similar increase was observed for the overall pT-stage (p<0.001). Moreover, tumor grade was higher in true interval cancers than in cancers detected in prophylactic mastectomy specimens (p=0.001). Most cancers were node negative (p=0.233). There were no significant differences in patient age, menopausal state, cancer receptor status, and pN-stage between true interval cancers and prophylactic mastectomy specimens. - Conclusions: True interval cancers are more often invasive, generally larger, and commonly of higher grade than screen detected cancers or cancers in prophylactic mastectomy specimens. The prognosis of cancers detected in prophylactic mastectomy specimens is particularly good as most of these lesions are in situ cancers only. Therefore, these incident cancers should not be regarded as interval cancers.}, + Material and methods: Review of our intermediate and high risk screening program from 2003 to 2013 identified 177 cancers. Of these, 136 were detected in screening, 15 cancers were true interval carcinomas detected due to symptoms, and 26 cancers were detected in prophylactic mastectomy specimens. Patient- and cancer characteristics (invasive versus in-situ disease, grade, pT-stage, age, menopausal state, cancer receptor status and pN-stage) between these three groups were compared using a Pearson's chi-square test for categorical variables or one-way ANOVA for continuous variables. + Results: The fraction of invasive disease was 8/26 (30.8%), 109/136 (80.1%) and 15/15 (100%) for cancers in prophylactic mastectomy specimens, screen detected cancers and interval cancers, respectively (p<0.001). The fraction of cancers larger than two centimeters was 1/26 (3.8%), 24/136 (17.6%) and 3/15 (20.0%), respectively. A similar increase was observed for the overall pT-stage (p<0.001). Moreover, tumor grade was higher in true interval cancers than in cancers detected in prophylactic mastectomy specimens (p=0.001). Most cancers were node negative (p=0.233). There were no significant differences in patient age, menopausal state, cancer receptor status, and pN-stage between true interval cancers and prophylactic mastectomy specimens. + Conclusions: True interval cancers are more often invasive, generally larger, and commonly of higher grade than screen detected cancers or cancers in prophylactic mastectomy specimens. The prognosis of cancers detected in prophylactic mastectomy specimens is particularly good as most of these lesions are in situ cancers only. Therefore, these incident cancers should not be regarded as interval cancers.}, optnote = {DIAG, RADIOLOGY}, } @@ -26028,22 +32313,22 @@ @conference{Vree16b booktitle = ECR, year = {2016}, abstract = {Purpose: Women at increased risk for breast cancer are screened with annual MRI and mammography. However, - despite this intensive surveillance interval cancers still occur. The purpose of this study is to evaluate prognostic - factors of interval carcinomas and compare these to prognostic factors of screen detected cancers. - Methods and Materials: In a review of our intermediate and high risk screening program from 2003 to 2013, 170 - cancers in 159 women were identified. Of these, 14 cancers were true interval carcinomas presenting with symptoms, - and 132 were detected in screening. TwentyA-A?A 1/2 four further cancers were detected in prophylactic mastectomy - specimens, and were excluded from this study. PatientA-A?A 1/2 and cancer characteristics of screen detected cancers and - interval cancers were compared using a Pearson's chiA-A?A 1/2 squared test for categorical variables and a Student's tA-A?A 1/2 test for - continuous variables. - Results: Interval cancers occurred in younger patients (p=0.001), had a higher pTA-A?A 1/2 stage (p=0.046), and were more - often ERA-A?A 1/2 negative and PRA-A?A 1/2 negative (p=0.002, and p=0.002, respectively). Tumor grade appeared worse in interval - carcinomas and were more often invasive, but this did not reach statistical significance (p=0.062, and p=0.063, - respectively). HER2A-A?A 1/2 status was not significantly different. Fortunately, no difference was observed in pNA-A?A 1/2 stage or - presence of metastatic disease. - Conclusion: Interval cancers occurring in women participating in intensive surveillance programs are of more - aggressive nature than screen detected cancers. However, our results suggest that interval cancers are detected when - the disease is local. This still results in a relatively good prognosis for patients with interval cancer.}, + despite this intensive surveillance interval cancers still occur. The purpose of this study is to evaluate prognostic + factors of interval carcinomas and compare these to prognostic factors of screen detected cancers. + Methods and Materials: In a review of our intermediate and high risk screening program from 2003 to 2013, 170 + cancers in 159 women were identified. Of these, 14 cancers were true interval carcinomas presenting with symptoms, + and 132 were detected in screening. TwentyA-A?A 1/2 four further cancers were detected in prophylactic mastectomy + specimens, and were excluded from this study. PatientA-A?A 1/2 and cancer characteristics of screen detected cancers and + interval cancers were compared using a Pearson's chiA-A?A 1/2 squared test for categorical variables and a Student's tA-A?A 1/2 test for + continuous variables. + Results: Interval cancers occurred in younger patients (p=0.001), had a higher pTA-A?A 1/2 stage (p=0.046), and were more + often ERA-A?A 1/2 negative and PRA-A?A 1/2 negative (p=0.002, and p=0.002, respectively). Tumor grade appeared worse in interval + carcinomas and were more often invasive, but this did not reach statistical significance (p=0.062, and p=0.063, + respectively). HER2A-A?A 1/2 status was not significantly different. Fortunately, no difference was observed in pNA-A?A 1/2 stage or + presence of metastatic disease. + Conclusion: Interval cancers occurring in women participating in intensive surveillance programs are of more + aggressive nature than screen detected cancers. However, our results suggest that interval cancers are detected when + the disease is local. This still results in a relatively good prognosis for patients with interval cancer.}, optnote = {DIAG, RADIOLOGY}, } @@ -26053,21 +32338,21 @@ @conference{Vree16c booktitle = ECR, year = {2016}, abstract = {Purpose: Breast MRI background parenchymal enhancement (BPE) has been identified as a risk factor for breast - cancer and has been associated to certain tumor characteristics. However, it is not known whether its presence is - related to tumor aggressiveness in high risk screening patients. The purpose of this study is to evaluate this - association between BPE and tumor grade in high risk screen detected breast cancers. - Methods and Materials: Review of our intermediate and high risk screening program from 2003A-A?A 1/2 2013 identified MRIscans - of 80 cancers in 79 patients (48A-A?A 1/2 9.8 years) with biopsy proven unilateral cancer and no previous breast cancer. - The level of BPE in the contralateral breast was scored as minimal, mild, moderate, and marked by two readers (one - 5th year resident (R1) and one experienced radiologist (R2)). Odds ratios (OR) were calculated for grade in relation to - BPE. Observer variability was computed using kappa statistics. - Results: A significant association was found between tumor grade and level of BPE in the contralateral breast for both - readers (the OR for high grade tumor was 0.394 (p=0.007) for R1 and 0.310 (p=0.002) for R2). After adjusting for - significant factors, the OR for high grade cancers was 0.924 for R1 and 2.066 for R2. Kappa value for BPE - assessment between readers was K=0.592. - Conclusion: Lower BPE might be associated to higher tumor grade, when only evaluating BPE. However, our results - suggest that other factors play a major role in this association. This limits the usefulness of BPE as a parameter for - therapy stratification.}, + cancer and has been associated to certain tumor characteristics. However, it is not known whether its presence is + related to tumor aggressiveness in high risk screening patients. The purpose of this study is to evaluate this + association between BPE and tumor grade in high risk screen detected breast cancers. + Methods and Materials: Review of our intermediate and high risk screening program from 2003A-A?A 1/2 2013 identified MRIscans + of 80 cancers in 79 patients (48A-A?A 1/2 9.8 years) with biopsy proven unilateral cancer and no previous breast cancer. + The level of BPE in the contralateral breast was scored as minimal, mild, moderate, and marked by two readers (one + 5th year resident (R1) and one experienced radiologist (R2)). Odds ratios (OR) were calculated for grade in relation to + BPE. Observer variability was computed using kappa statistics. + Results: A significant association was found between tumor grade and level of BPE in the contralateral breast for both + readers (the OR for high grade tumor was 0.394 (p=0.007) for R1 and 0.310 (p=0.002) for R2). After adjusting for + significant factors, the OR for high grade cancers was 0.924 for R1 and 2.066 for R2. Kappa value for BPE + assessment between readers was K=0.592. + Conclusion: Lower BPE might be associated to higher tumor grade, when only evaluating BPE. However, our results + suggest that other factors play a major role in this association. This limits the usefulness of BPE as a parameter for + therapy stratification.}, optnote = {DIAG, RADIOLOGY}, } @@ -26086,7 +32371,9 @@ @article{Vree17 optnote = {DIAG}, pmid = {28463932}, gsid = {8986280950670397521}, - gscites = {21}, + gscites = {40}, + ss_id = {f0dff8ace53e7e0c2bd64dd5f18996eedf10e4cb}, + all_ss_ids = {['f0dff8ace53e7e0c2bd64dd5f18996eedf10e4cb']}, } @article{Vree18, @@ -26104,7 +32391,9 @@ @article{Vree18 pmid = {29351560}, month = {1}, gsid = {5339776334232974796}, - gscites = {11}, + gscites = {17}, + ss_id = {6492ea59487a07b4a2c380daeebebb26a6ff8bc2}, + all_ss_ids = {['6492ea59487a07b4a2c380daeebebb26a6ff8bc2']}, } @phdthesis{Vree18a, @@ -26136,7 +32425,9 @@ @article{Vree18b optnote = {DIAG}, pmid = {29383629}, gsid = {7496291364902253050}, - gscites = {14}, + gscites = {24}, + ss_id = {cd47d4eae40407bb844a7322d417cb2fe667e6eb}, + all_ss_ids = {['cd47d4eae40407bb844a7322d417cb2fe667e6eb']}, } @article{Vree18c, @@ -26154,7 +32445,9 @@ @article{Vree18c optnote = {DIAG}, pmid = {30075794}, gsid = {10033587502840045193}, - gscites = {14}, + gscites = {34}, + ss_id = {7c3b43419293d766b7b6a69c4606d43cbbc13cd2}, + all_ss_ids = {['7c3b43419293d766b7b6a69c4606d43cbbc13cd2']}, } @article{Vree18d, @@ -26172,7 +32465,26 @@ @article{Vree18d optnote = {DIAG}, pmid = {29040037}, gsid = {4123505121536739933}, - gscites = {25}, + gscites = {45}, + ss_id = {37dd5df4c173aac442ea358dd6135100e21c4451}, + all_ss_ids = {['37dd5df4c173aac442ea358dd6135100e21c4451']}, +} + +@article{Vree19, + author = {Vreemann, Suzan and Dalmis, Mehmet U. and Bult, Peter and Karssemeijer, Nico and Broeders, Mireille J. M. and Gubern-M\'{e}rida, Albert and Mann, Ritse M.}, + title = {~~Amount of fibroglandular tissue FGT and background parenchymal enhancement BPE in relation to breast cancer risk and false positives in a breast MRI screening program}, + doi = {10.1007/s00330-019-06020-2}, + year = {2019}, + abstract = {The purpose of this study is to evaluate the predictive value of the amount of fibroglandular tissue (FGT) and background parenchymal enhancement (BPE), measured at baseline on breast MRI, for breast cancer development and risk of false-positive findings in women at increased risk for breast cancer. Negative baseline MRI scans of 1533 women participating in a screening program for women at increased risk for breast cancer between January 1, 2003, and January 1, 2014, were selected. Automated tools based on deep learning were used to obtain quantitative measures of FGT and BPE. Logistic regression using forward selection was used to assess relationships between FGT, BPE, cancer detection, false-positive recall, and false-positive biopsy. Sixty cancers were detected in follow-up. FGT was only associated to short-term cancer risk; BPE was not associated with cancer risk. High FGT and BPE did lead to more false-positive recalls at baseline (OR 1.259, p = 0.050, and OR 1.475, p = 0.003) and to more frequent false-positive biopsies at baseline (OR 1.315, p = 0.049, and OR 1.807, p = 0.002), but were not predictive for false-positive findings in subsequent screening rounds. FGT and BPE, measured on baseline MRI, are not predictive for overall breast cancer development in women at increased risk. High FGT and BPE lead to more false-positive findings at baseline. * Amount of fibroglandular tissue is only predictive for short-term breast cancer risk in women at increased risk. * Background parenchymal enhancement measured on baseline MRI is not predictive for breast cancer development in women at increased risk. * High amount of fibroglandular tissue and background parenchymal enhancement lead to more false-positive findings at baseline MRI.}, + url = {http://dx.doi.org/10.1007/s00330-019-06020-2}, + file = {Vree19.pdf:pdf\\Vree19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {23}, + pages = {4678-4690}, + volume = {29}, + pmid = {30796568}, } @inproceedings{Vug18, @@ -26186,6 +32498,9 @@ @inproceedings{Vug18 optnote = {DIAG, RADIOLOGY}, year = {2019}, month = {3}, + ss_id = {bbb3afacdbf1275e48ff46f21d9ac37574f281a6}, + all_ss_ids = {['bbb3afacdbf1275e48ff46f21d9ac37574f281a6']}, + gscites = {0}, } @inproceedings{Vuka08, @@ -26245,6 +32560,8 @@ @article{Vuka10 month = {1}, gsid = {3053704105909674321}, gscites = {51}, + ss_id = {4ea221e633c584c7abce455e17071d4d95b5fa9c}, + all_ss_ids = {['4ea221e633c584c7abce455e17071d4d95b5fa9c']}, } @inproceedings{Vuka11, @@ -26261,7 +32578,9 @@ @inproceedings{Vuka11 optnote = {DIAG, RADIOLOGY}, month = {3}, gsid = {13919107544468215275}, - gscites = {4}, + gscites = {7}, + ss_id = {c072c0566f86cc9bbd967741584f0b5f2d70f0d4}, + all_ss_ids = {['c072c0566f86cc9bbd967741584f0b5f2d70f0d4']}, } @article{Vuka12, @@ -26278,7 +32597,9 @@ @article{Vuka12 pmid = {21614484}, month = {5}, gsid = {4492406858408739423}, - gscites = {24}, + gscites = {32}, + ss_id = {3e95abdc1bec8622e45fc2258e27f31820754c7b}, + all_ss_ids = {['3e95abdc1bec8622e45fc2258e27f31820754c7b']}, } @mastersthesis{Vyaw22, @@ -26292,6 +32613,17 @@ @mastersthesis{Vyaw22 journal = {Master thesis}, } +@mastersthesis{Vyaw23, + author = {Vyawahare, Sanyog and Venkadesh, Kiran Vaidhya and Jacobs, Colin}, + title = {Automated segmentation of subsolid pulmonary nodules in CT scans using deep learning}, + abstract = {Lung cancer is the second most diagnosed cancer and is the leading cause of cancer-related deaths globally. A pulmonary nodule can turn into cancer, leading to fatal outcomes if left undetected. Compared to other types of pulmonary nodules, subsolid nodules (SSN) pose a higher risk of malignancy. Subsolid nodules can be categorized into two subtypes: ground-glass opacities (GGOs) or part-solid nodules (PSNs). The assessment of SSNs by physicians on cancer risk and the stage is highly dependent on the size and volume of the nodule. Therefore accurate segmentations are crucial for volumetric calculations when dealing with SSNs. Currently, semi-automated methods are deployed to segment the boundaries of SSNs. This requires a radiologist's manual inputs and fine- tuning and could lead to sub-optimal results. Furthermore, there is no study to date which focuses on evaluating the performance of deep learning in SSNs. Over the past decade, deep learning has made significant strides in medical imaging segmentation, and networks like nnUNet have demonstrated great potential in adapting to new datasets. In this research, nnUNet was used to build a fully-automated segmentation model. However, the successful application of the model requires a high-quality dataset with accurate annotations, particularly for the segmentation of SSNs, which has been an area of limited research. To address this, our research focused on creating a carefully curated dataset with annotations provided by an experienced radiologist using a dedicated lung screening workstation. The model achieved a Dice similarity coefficient of 83.3% for the GGOs and 77.6% & 76% for non-solid and solid core respectively for the PSNs on an external validation dataset. The model provides satisfactory segmentation results in a minimal time, without any external input. It is able to learn the behaviour of the semi-automated method to produce similar segmentation. The model has shown promising potential to generate accurate and objective segmentation without human input for the subsolid nodules. The proposed model acts as a good benchmark in the segmentation of subsolid pulmonary nodules.}, + file = {Vyaw23.pdf:pdf\\Vyaw23.pdf:PDF}, + journal = {Master thesis}, + optnote = {DIAG}, + school = {Radboud University Medical Center}, + year = {2023}, +} + @article{Waal15, author = {{van der Waal}, Dani{\"{e}}lle and Emaus, Marleen J. and Bakker, Marije F. and {den Heeten}, Gerard J. and Karssemeijer, Nico and Pijnappel, Ruud M. and Veldhuis, Wouter B. and Verbeek, Andr{\'{e}} L M. and {van Gils}, Carla H. and Broeders, Mireille J M.}, title = {Geographic variation in volumetric breast density between screening regions in the Netherlands}, @@ -26308,7 +32640,9 @@ @article{Waal15 optnote = {DIAG, RADIOLOGY}, pmid = {26134996}, gsid = {87862472582705823}, - gscites = {13}, + gscites = {17}, + ss_id = {1ed7b8605b403640ecb3443ac8097d8cabf60fe0}, + all_ss_ids = {['1ed7b8605b403640ecb3443ac8097d8cabf60fe0']}, } @article{Wal14a, @@ -26342,9 +32676,9 @@ @conference{Wand15 booktitle = ECR, year = {2015}, abstract = {Purpose: We examined to what extent mammographic density affects screening performance when using full field digital mammography (FFDM). - Methods and Materials: We collected a consecutive series of 69,874 FFDM examinations (2003-2009) from one screening unit of the Dutch biennial screening program (50-75 years). Volumetric mammographic density was automatically assessed with Volpara version 1.5.0 (Matakina, New Zealand). Recall and breast cancer detection information was obtained from the screening registration system. Interval cancers were identified through linkage with the Netherlands Cancer Registry. Within four density categories, comparable to ACR breast density categories, we determined screening performance measures and linear trends with a Chi Square linear trend test. - Results: 19.7% of the examinations was categorised as density category 1 ('almost entirely fatty'), 43.1% as category 2, 29.4% as category 3 and 7.7% as category 4 ('extremely dense'). In total 421 screen-detected and 150 interval tumours were identified. Cancer detection rates were 3.7?, 6.4?, 6.6? and 6.3? in categories 1 to 4 respectively (p=0.005). Interval cancer rates increased with increasing density categories: 0.7?, 1.9?, 3.0? and 4.5?, respectively (p< 0.001). As a result, the sensitivity (proportion of screen-detected tumours of screen-detected and interval tumours) was lower in higher density categories: 85.0%, 77.6%, 69.0% and 58.6% respectively (p< 0.001). The number of false positives was higher in women with dense breasts: 11.4?, 14.1?, 18.3? and 28.6? for categories 1 to 4, respectively (p< 0.001). - Conclusion: Also when FFDM is used in breast cancer screening higher interval cancer and false-positive rates are observed in women with mammographically dense breasts.}, + Methods and Materials: We collected a consecutive series of 69,874 FFDM examinations (2003-2009) from one screening unit of the Dutch biennial screening program (50-75 years). Volumetric mammographic density was automatically assessed with Volpara version 1.5.0 (Matakina, New Zealand). Recall and breast cancer detection information was obtained from the screening registration system. Interval cancers were identified through linkage with the Netherlands Cancer Registry. Within four density categories, comparable to ACR breast density categories, we determined screening performance measures and linear trends with a Chi Square linear trend test. + Results: 19.7% of the examinations was categorised as density category 1 ('almost entirely fatty'), 43.1% as category 2, 29.4% as category 3 and 7.7% as category 4 ('extremely dense'). In total 421 screen-detected and 150 interval tumours were identified. Cancer detection rates were 3.7?, 6.4?, 6.6? and 6.3? in categories 1 to 4 respectively (p=0.005). Interval cancer rates increased with increasing density categories: 0.7?, 1.9?, 3.0? and 4.5?, respectively (p< 0.001). As a result, the sensitivity (proportion of screen-detected tumours of screen-detected and interval tumours) was lower in higher density categories: 85.0%, 77.6%, 69.0% and 58.6% respectively (p< 0.001). The number of false positives was higher in women with dense breasts: 11.4?, 14.1?, 18.3? and 28.6? for categories 1 to 4, respectively (p< 0.001). + Conclusion: Also when FFDM is used in breast cancer screening higher interval cancer and false-positive rates are observed in women with mammographically dense breasts.}, optnote = {DIAG, RADIOLOGY}, } @@ -26378,9 +32712,9 @@ @conference{Wand16 booktitle = {Annual conference of the International Agency for Research on Cancer}, year = {2016}, abstract = {Purpose: In light of breast density legislation and discussions about supplemental screening it is important to know not only one's risk of breast cancer, but particularly the risk of a tumor that is not detected through mammographic screening. We investigated the relationship between volumetric breast density and the risk of screen-detected and interval cancer within a digital mammography (DM) screening program. - Methods: Mammographic density was automatically assessed with Volpara version 1.5.0 (Matakina, New Zealand) on the first available digital mammogram of 43,211 women (50-75 years) participating in the Dutch biennial breast cancer screening program (2003-2009). Screen-detected and interval breast cancer information was obtained from the screening registration system and through linkage with the Netherlands Cancer Registry. We estimated risks of screen-detected and interval cancers in relation to breast density using multinomial logistic regression analysis (adjusted for age). No other confounders were available in this routine screening database. - Results: 413 screen-detected and 150 interval tumors were identified. Screen-detected breast cancer risk was significantly higher in the higher breast density categories compared to the lowest (OR: 1.65, 95% CI: 1.21-2.24, OR: 1.78, 95% CI: 1.29-2.47, OR: 1.69, 95% CI: 1.08-2.63, for density categories 2 to 4 respectively compared to 1). Interval cancer risk increased with increasing breast density (OR: 2.45, 95% CI: 1.20-4.99, OR: 5.24, 95% CI: 2.59-10.59 and OR: 6.86, 95% CI: 3.12-15.11, for density categories 2 to 4 respectively compared to 1). The relationship with interval cancers was statistically significantly stronger than with screen-detected cancers (p<0.01) for density categories 3 and 4. - Conclusions: Although higher breast density is related to a higher risk of a screen-detected breast cancer, it is particularly strongly related to the risk of a breast cancer that is not detected through mammographic screening (interval cancer).}, + Methods: Mammographic density was automatically assessed with Volpara version 1.5.0 (Matakina, New Zealand) on the first available digital mammogram of 43,211 women (50-75 years) participating in the Dutch biennial breast cancer screening program (2003-2009). Screen-detected and interval breast cancer information was obtained from the screening registration system and through linkage with the Netherlands Cancer Registry. We estimated risks of screen-detected and interval cancers in relation to breast density using multinomial logistic regression analysis (adjusted for age). No other confounders were available in this routine screening database. + Results: 413 screen-detected and 150 interval tumors were identified. Screen-detected breast cancer risk was significantly higher in the higher breast density categories compared to the lowest (OR: 1.65, 95% CI: 1.21-2.24, OR: 1.78, 95% CI: 1.29-2.47, OR: 1.69, 95% CI: 1.08-2.63, for density categories 2 to 4 respectively compared to 1). Interval cancer risk increased with increasing breast density (OR: 2.45, 95% CI: 1.20-4.99, OR: 5.24, 95% CI: 2.59-10.59 and OR: 6.86, 95% CI: 3.12-15.11, for density categories 2 to 4 respectively compared to 1). The relationship with interval cancers was statistically significantly stronger than with screen-detected cancers (p<0.01) for density categories 3 and 4. + Conclusions: Although higher breast density is related to a higher risk of a screen-detected breast cancer, it is particularly strongly related to the risk of a breast cancer that is not detected through mammographic screening (interval cancer).}, optnote = {DIAG, RADIOLOGY}, } @@ -26400,7 +32734,9 @@ @article{Wand17 pmid = {28012087}, month = {12}, gsid = {1629763110736326637}, - gscites = {57}, + gscites = {103}, + ss_id = {f450e015384d3f06fb1b1885536cdec1d634c785}, + all_ss_ids = {['f450e015384d3f06fb1b1885536cdec1d634c785']}, } @article{Wand17a, @@ -26413,15 +32749,49 @@ @article{Wand17a pages = {67}, doi = {10.1186/s13058-017-0859-9}, abstract = {Background In the light of the breast density legislation in the USA, it is important to know a woman's breast cancer risk, but particularly her risk of a tumor that is not detected through mammographic screening (interval cancer). Therefore, we examined the associations of automatically measured volumetric breast density with screen-detected and interval cancer risk, separately. - Methods Volumetric breast measures were assessed automatically using Volpara version 1.5.0 (Matakina, New Zealand) for the first available digital mammography (DM) examination of 52,814 women (age 50-75 years) participating in the Dutch biennial breast cancer screening program between 2003 and 2011. Breast cancer information was obtained from the screening registration system and through linkage with the Netherlands Cancer Registry. We excluded all screen-detected breast cancers diagnosed as a result of the first digital screening examination. During a median follow-up period of 4.2 (IQR 2.0-6.2) years, 523 women were diagnosed with breast cancer of which 299 were screen-detected and 224 were interval breast cancers. The associations between volumetric breast measures and breast cancer risk were determined using Cox proportional hazards analyses. - Results Percentage dense volume was found to be positively associated with both interval and screen-detected breast cancers (hazard ratio (HR) 8.37 (95% CI 4.34-16.17) and HR 1.39 (95% CI 0.82-2.36), respectively, for Volpara density grade category (VDG) 4 compared to VDG1 (p for heterogeneity < 0.001)). Dense volume (DV) was also found to be positively associated with both interval and screen-detected breast cancers (HR 4.92 (95% CI 2.98-8.12) and HR 2.30 (95% CI 1.39-3.80), respectively, for VDG-like category (C)4 compared to C1 (p for heterogeneity = 0.041)). The association between percentage dense volume categories and interval breast cancer risk (HR 8.37) was not significantly stronger than the association between absolute dense volume categories and interval breast cancer risk (HR 4.92). - Conclusions Our results suggest that both absolute dense volume and percentage dense volume are strong markers of breast cancer risk, but that they are even stronger markers for predicting the occurrence of tumors that are not detected during mammography breast cancer screening.}, + Methods Volumetric breast measures were assessed automatically using Volpara version 1.5.0 (Matakina, New Zealand) for the first available digital mammography (DM) examination of 52,814 women (age 50-75 years) participating in the Dutch biennial breast cancer screening program between 2003 and 2011. Breast cancer information was obtained from the screening registration system and through linkage with the Netherlands Cancer Registry. We excluded all screen-detected breast cancers diagnosed as a result of the first digital screening examination. During a median follow-up period of 4.2 (IQR 2.0-6.2) years, 523 women were diagnosed with breast cancer of which 299 were screen-detected and 224 were interval breast cancers. The associations between volumetric breast measures and breast cancer risk were determined using Cox proportional hazards analyses. + Results Percentage dense volume was found to be positively associated with both interval and screen-detected breast cancers (hazard ratio (HR) 8.37 (95% CI 4.34-16.17) and HR 1.39 (95% CI 0.82-2.36), respectively, for Volpara density grade category (VDG) 4 compared to VDG1 (p for heterogeneity < 0.001)). Dense volume (DV) was also found to be positively associated with both interval and screen-detected breast cancers (HR 4.92 (95% CI 2.98-8.12) and HR 2.30 (95% CI 1.39-3.80), respectively, for VDG-like category (C)4 compared to C1 (p for heterogeneity = 0.041)). The association between percentage dense volume categories and interval breast cancer risk (HR 8.37) was not significantly stronger than the association between absolute dense volume categories and interval breast cancer risk (HR 4.92). + Conclusions Our results suggest that both absolute dense volume and percentage dense volume are strong markers of breast cancer risk, but that they are even stronger markers for predicting the occurrence of tumors that are not detected during mammography breast cancer screening.}, file = {:pdf/Wand17a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {28583146}, month = {6}, gsid = {897339558877213208}, - gscites = {28}, + gscites = {54}, + all_ss_ids = {['10123038b80006871abc7396810bed15bfa29387', 'd677e08922a1c646ef739f06bae679cc795ba362']}, +} + +@article{Wand18, + author = {Wanders, Johanna O. P. and van Gils, Carla H. and Karssemeijer, Nico and Holland, Katharina and Kallenberg, Michiel and Peeters, Petra H. M. and Nielsen, Mads and Lillholm, Martin}, + title = {~~The combined effect of mammographic texture and density on breast cancer risk: a cohort study}, + doi = {10.1186/s13058-018-0961-7}, + year = {2018}, + abstract = {Texture patterns have been shown to improve breast cancer risk segregation in addition to area-based mammographic density. The additional value of texture pattern scores on top of volumetric mammographic density measures in a large screening cohort has never been studied. Volumetric mammographic density and texture pattern scores were assessed automatically for the first available digital mammography (DM) screening examination of 51,400 women (50-75 years of age) participating in the Dutch biennial breast cancer screening program between 2003 and 2011. The texture assessment method was developed in a previous study and validated in the current study. Breast cancer information was obtained from the screening registration system and through linkage with the Netherlands Cancer Registry. All screen-detected breast cancers diagnosed at the first available digital screening examination were excluded. During a median follow-up period of 4.2 (interquartile range (IQR) 2.0-6.2) years, 301 women were diagnosed with breast cancer. The associations between texture pattern scores, volumetric breast density measures and breast cancer risk were determined using Cox proportional hazard analyses. Discriminatory performance was assessed using c-indices. The median age of the women at the time of the first available digital mammography examination was 56 years (IQR 51-63). Texture pattern scores were positively associated with breast cancer risk (hazard ratio (HR) 3.16 (95% CI 2.16-4.62) (p value for trend <0.001), for quartile (Q) 4 compared to Q1). The c-index of texture was 0.61 (95% CI 0.57-0.64). Dense volume and percentage dense volume showed positive associations with breast cancer risk (HR 1.85 (95% CI 1.32-2.59) (p value for trend <0.001) and HR 2.17 (95% CI 1.51-3.12) (p value for trend <0.001), respectively, for Q4 compared to Q1). When adding texture measures to models with dense volume or percentage dense volume, c-indices increased from 0.56 (95% CI 0.53-0.59) to 0.62 (95% CI 0.58-0.65) (p < 0.001) and from 0.58 (95% CI 0.54-0.61) to 0.60 (95% CI 0.57-0.63) (p = 0.054), respectively. Deep-learning-based texture pattern scores, measured automatically on digital mammograms, are associated with breast cancer risk, independently of volumetric mammographic density, and augment the capacity to discriminate between future breast cancer and non-breast cancer cases.}, + url = {http://dx.doi.org/10.1186/s13058-018-0961-7}, + file = {Wand18.pdf:pdf\\Wand18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Breast Cancer Research}, + automatic = {yes}, + citation-count = {28}, + volume = {20}, + pmid = {29720220}, +} + +@article{Wand22, + author = {Wanders, Alexander J. T. and Mees, Willem and Bun, Petra A.M. and Janssen, Natasja and Rodr\'{i}guez-Ruiz, Alejandro and Dalm\i\c{s}, Mehmet Ufuk and Karssemeijer, Nico and van Gils, Carla H. and Sechopoulos, Ioannis and Mann, Ritse M. and van Rooden, Cornelis Jan}, + title = {~~Interval Cancer Detection Using a Neural Network and Breast Density in Women with Negative Screening Mammograms}, + doi = {10.1148/radiol.210832}, + year = {2022}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1148/radiol.210832}, + file = {Wand22.pdf:pdf\\Wand22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Radiology}, + automatic = {yes}, + citation-count = {24}, + pages = {269-275}, + volume = {303}, + pmid = {35133194}, } @conference{Wang12, @@ -26471,13 +32841,30 @@ @conference{Wang15b optnote = {DIAG, RADIOLOGY}, } +@article{Wieg19, + author = {Wiegertjes, Kim and ter Telgte, Annemieke and Oliveira, Pedro B. and van Leijsen, Esther M.C. and Bergkamp, Mayra I. and van Uden, Ingeborg W.M. and Ghafoorian, Mohsen and van der Holst, Helena M. and Norris, David G. and Platel, Bram and Klijn, Catharina J.M. and Tuladhar, Anil M. and de Leeuw, Frank-Erik}, + title = {~~The role of small diffusion-weighted imaging lesions in cerebral small vessel disease}, + doi = {10.1212/wnl.0000000000008364}, + year = {2019}, + abstract = {ObjectiveTo investigate the prevalence of asymptomatic diffusion-weighted imaging-positive (DWI+) lesions in individuals with cerebral small vessel disease (SVD) and identify their role in the origin of SVD markers on MRI.MethodsWe included 503 individuals with SVD from the Radboud University Nijmegen Diffusion Tensor and Magnetic Resonance Imaging Cohort (RUN DMC) study (mean age 65.6 years [SD 8.8], 56.5% male) with 1.5T MRI in 2006 and, if available, follow-up MRI in 2011 and 2015. We screened DWI scans (n = 1,152) for DWI+ lesions, assessed lesion evolution on follow-up fluid-attenuated inversion recovery, T1 and T2* images, and examined the association between DWI+ lesions and annual SVD progression (white matter hyperintensities [WMH], lacunes, microbleeds).ResultsWe found 50 DWI+ lesions in 39 individuals on 1,152 DWI (3.4%). Individuals with DWI+ lesions were older (p = 0.025), more frequently had a history of hypertension (p = 0.021), and had a larger burden of preexisting SVD MRI markers (WMH, lacunes, microbleeds: all p < 0.001) compared to individuals without DWI+ lesions. Of the 23 DWI+ lesions with available follow-up MRI, 14 (61%) evolved into a WMH, 8 (35%) resulted in a cavity, and 1 (4%) was no longer visible. Presence of DWI+ lesions was significantly associated with annual WMH volume increase and yearly incidence of lacunes and microbleeds (all p < 0.001).ConclusionOver 3% of individuals with SVD have DWI+ lesions. Although DWI+ lesions play a role in the progression of SVD, they may not fully explain progression of SVD markers on MRI, suggesting that other factors than acute ischemia are at play.}, + url = {http://dx.doi.org/10.1212/WNL.0000000000008364}, + file = {Wieg19.pdf:pdf\\Wieg19.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Neurology}, + automatic = {yes}, + citation-count = {11}, + pages = {e1627-e1634}, + volume = {93}, + pmid = {31530710}, +} + @conference{Wiel10, author = {P.A. Wielopolski and P. Ciet and G.P. Krestin and M.H. Lequin and H. Tiddens and R. Manniesing}, title = {Automated airway lumen segmentation and characterization for use in patients with tracheomalacya: a feasibility study}, booktitle = ISMRM, year = {2010}, abstract = {Introduction: Tracheomalacia (TM) refers to a weakness of the trachea, frequently due to reduction and/or atrophy of the longitudinal elastic fibers of the pars membranacea, or impaired cartilage integrity, such that the airway is softer and more susceptible to collapse. Various degrees of tracheal collapse, and therefore airway obstruction, can result from this narrowing. Diagnosis of TM includes history and physical examination, e.g. expiratory manoeuvre and cough. Pulmonary function tests include the determination of flow limitations during expiration. However, endoscopy is the essential and invaluable tool and remains the gold standard method for evaluating the airways. From the imaging perspective, conventional radiographs have had a lower sensitivity (62%), and are used in conjunction with endoscopy. A CT-scan is the initial radiologic test in cases of suspect TM. MRI is another imaging possibility for evaluating central airway abnormalities, however, not often used because of severe drawbacks in an area with large magnetic susceptibility gradients, poor signal homogeneity and prone to low spatial resolution and motion artifacts. The majority of papers diagnosis of TM considers imaging during end-inspiration and end-expiration. Nonetheless, more recently, some authors have demonstrated the importance of dynamic CINE acquisitions, indicating that dynamic-MRI studies during coughing may facilitate the evaluation of the collapsibility of the trachea in patients with TM. Purpose:The purpose of this work was to provide: first, a suitable acquisition scenario including static and dynamic 3D MRI sequences with sufficient temporal and spatial resolution to provide good morphological information and visualization of dynamic events in the central airways and, secondly, to provide the means for an automatic analysis program suitable to segment the airway lumen and a dynamic evaluation of cross-sectional areas of the central airways down to the 2nd generation branching. Materials and Methods: 10 healthy adult volunteers between 18 and 50 years of age were recruited as pilot group to optimize image acquisition for the static and dynamic portions of the MRI examination at 1.5T. Volunteers were trained to perform spirometry controlled breathing manoeuvres using a MRI compatible spirometer. Each subject was instructed additionally to perform forced expiration and cough maneuvers. A-A?A 1/2 Static?? 13-second breath-hold scans covering the entire thoracic region were acquired at end-inspiration and endexpiration using a 3D rf-spoiled gradient echo sequence with TR/TE=1.2/0.5 ms, flip angle 2A-A?A 1/2 , sagittal volume acquisition with isotropic (2.8) 3 mm3 voxels. A-A?A 1/2 Dynamic?? scans were performed with the same scan parameters but covering only the central thorax (1/3 volume) with a temporal resolution of 500 ms per volume using the TRICKS (time resolved imaging of contrast kinetics) platform and accelerated imaging options. In-house developed software for segmentation and analysis was used. To initiate the timedomain analysis 3 seeds were placed corresponding to the beginning of the trachea and ends of the left and right primary bronchi to produce a centerline. The lumen is then segmented and a surface created to produce a unique reference frame to ease the timeanalysis (Figure 1). A cross-sectional analysis can then be performed to determine stenosis and distensibility parameters. Likewise, longitudinal and geometrical analyses (e.g., bifurcation angles and planarity) are generated. Results and Discussion: The software tracks the level of the branching automatically and provides a uniquely defined origin per data set thus enabling time comparisons in the same individual and across healthy and patients with TM. The analysis is completely automated (except for three seed points for lumen), providing as output any lumen based parameters that are desired and/or are clinical relevant. With optimized parameter settings the method successfully tracked the central airway paths in all volunteers. Conclusions: The results show that - robust and accurate segmentation of the airways is feasible with the acquired MRI datasets. This work is highly relevant for clinical research and practice: automated lumen segmentation in patients with TM (or other related disease of the airways) is the first step for automatic grading of airway malignancy.}, + robust and accurate segmentation of the airways is feasible with the acquired MRI datasets. This work is highly relevant for clinical research and practice: automated lumen segmentation in patients with TM (or other related disease of the airways) is the first step for automatic grading of airway malignancy.}, file = {Wiel10.pdf:pdf\\Wiel10.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, } @@ -26501,6 +32888,51 @@ @inproceedings{Wild21 file = {:pdf/Wild21.pdf:PDF}, optnote = {DIAG, INPRESS, RADIOLOGY}, year = {2021}, + ss_id = {5b84c0f6d94934c501de0301707c50be97897122}, + all_ss_ids = {['5b84c0f6d94934c501de0301707c50be97897122']}, + gscites = {1}, +} + +@article{Wild23a, + author = {de Wilde, Bram and Joosten, Frank and Venderink, Wulphert and Davidse, Mirjam E. J. and Geurts, Juliette and Kruijt, Hanneke and Vermeulen, Afke and Martens, Bibi and Schyns, Maxime V. P. and Huige, Josephine C. B. M. and de Boer, Myrte C. and Tonino, Bart A. R. and Zandvoort, Herman J. A. and Lammert, Kirsti and Parviainen, Helka and Vuorinen, Aino-Maija and Syvaranta, Suvi and Vogels, Ruben R. M. and Prins, Wiesje and Coppola, Andrea and Bossa, Nancy and ten Broek, Richard P. G. and Huisman, Henkjan}, + title = {Inter-and Intra-Observer Variability and the Effect of Experience in Cine-MRI for Adhesion Detection}, + doi = {https://doi.org/10.3390/jimaging9030055}, + number = {3}, + pages = {55}, + volume = {9}, + journal = {Journal of Imaging}, + optnote = {DIAG, RADIOLOGY}, + publisher = {MDPI}, + year = {2023}, + ss_id = {74612633c1b07655ed9a081d1540deb22a257430}, + all_ss_ids = {['74612633c1b07655ed9a081d1540deb22a257430']}, + gscites = {1}, +} + +@article{Wild23b, + author = {de Wilde, Bram and Saha, Anindo and ten Broek, Richard PG and Huisman, Henkjan}, + title = {Medical diffusion on a budget: textual inversion for medical image generation}, + journal = {arXiv:2303.13430}, + optnote = {DIAG, RADIOLOGY}, + year = {2023}, + ss_id = {41579777836a07a1bd8b4d8593fcda7983b68e67}, + all_ss_ids = {['41579777836a07a1bd8b4d8593fcda7983b68e67']}, + gscites = {2}, +} + +@article{Wild23c, + author = {van den Beukel, Bastiaan A. W. and de Wilde, Bram and Joosten, Frank and van Goor, Harry and Venderink, Wulphert and Huisman, Henkjan J. and ten Broek, Richard P. G.}, + title = {Quantifiable Measures of Abdominal Wall Motion for Quality Assessment of Cine-MRI Slices in Detection of Abdominal Adhesions}, + doi = {10.3390/jimaging9050092}, + number = {5}, + volume = {9}, + journal = {Journal of Imaging}, + optnote = {DIAG, RADIOLOGY}, + publisher = {MDPI}, + year = {2023}, + ss_id = {31c44bb386353af1a2dc76bd533eff7433424c45}, + all_ss_ids = {['31c44bb386353af1a2dc76bd533eff7433424c45']}, + gscites = {0}, } @article{Wink15a, @@ -26517,7 +32949,9 @@ @article{Wink15a pmid = {25764091}, month = {3}, gsid = {5188005511580764305}, - gscites = {51}, + gscites = {65}, + ss_id = {8b3995db48fa7ab9539e3aaa72eb71cba8e68802}, + all_ss_ids = {['8b3995db48fa7ab9539e3aaa72eb71cba8e68802']}, } @mastersthesis{Wink19, @@ -26541,6 +32975,43 @@ @article{Wink21 doi = {10.1097/rli.0000000000000780}, url = {https://doi.org/10.1097%2Frli.0000000000000780}, publisher = {Ovid Technologies (Wolters Kluwer Health)}, + ss_id = {24ecb4a2f6f46d782edc6a04517480b78d896ac1}, + all_ss_ids = {['24ecb4a2f6f46d782edc6a04517480b78d896ac1']}, + gscites = {18}, +} + +@article{Wink21a, + author = {van Winkel, Suzanne L. and Rodr\'{i}guez-Ruiz, Alejandro and Appelman, Linda and Gubern-M\'{e}rida, Albert and Karssemeijer, Nico and Teuwen, Jonas and Wanders, Alexander J. T. and Sechopoulos, Ioannis and Mann, Ritse M.}, + title = {~~Impact of artificial intelligence support on accuracy and reading time in breast tomosynthesis image interpretation: a multi-reader multi-case study}, + doi = {10.1007/s00330-021-07992-w}, + year = {2021}, + abstract = {Abstract + Objectives + Digital breast tomosynthesis (DBT) increases sensitivity of mammography and is increasingly implemented in breast cancer screening. However, the large volume of images increases the risk of reading errors and reading time. This study aims to investigate whether the accuracy of breast radiologists reading wide-angle DBT increases with the aid of an artificial intelligence (AI) support system. Also, the impact on reading time was assessed and the stand-alone performance of the AI system in the detection of malignancies was compared to the average radiologist. + + Methods + A multi-reader multi-case study was performed with 240 bilateral DBT exams (71 breasts with cancer lesions, 70 breasts with benign findings, 339 normal breasts). Exams were interpreted by 18 radiologists, with and without AI support, providing cancer suspicion scores per breast. Using AI support, radiologists were shown examination-based and region-based cancer likelihood scores. Area under the receiver operating characteristic curve (AUC) and reading time per exam were compared between reading conditions using mixed-models analysis of variance. + + Results + On average, the AUC was higher using AI support (0.863 vs 0.833; p = 0.0025). Using AI support, reading time per DBT exam was reduced (p < 0.001) from 41 (95% CI = 39-42 s) to 36 s (95% CI = 35- 37 s). The AUC of the stand-alone AI system was non-inferior to the AUC of the average radiologist (+0.007, p = 0.8115). + + Conclusions + Radiologists improved their cancer detection and reduced reading time when evaluating DBT examinations using an AI reading support system. + + Key Points + * Radiologists improved their cancer detection accuracy in digital breast tomosynthesis (DBT) when using an AI system for support, while simultaneously reducing reading time. + * The stand-alone breast cancer detection performance of an AI system is non-inferior to the average performance of radiologists for reading digital breast tomosynthesis exams. + * The use of an AI support system could make advanced and more reliable imaging techniques more accessible and could allow for more cost-effective breast screening programs with DBT. + }, + url = {http://dx.doi.org/10.1007/s00330-021-07992-w}, + file = {Wink21a.pdf:pdf\\Wink21a.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Radiology}, + automatic = {yes}, + citation-count = {36}, + pages = {8682-8691}, + volume = {31}, + pmid = {33948701}, } @article{Witt10, @@ -26576,7 +33047,9 @@ @article{Witt11 pmid = {21178052}, month = {1}, gsid = {9265698792924388926}, - gscites = {21}, + gscites = {24}, + ss_id = {afda5ff4f4b03cd95f1f52ac8886397e46b5300d}, + all_ss_ids = {['afda5ff4f4b03cd95f1f52ac8886397e46b5300d']}, } @article{Witt12, @@ -26594,7 +33067,9 @@ @article{Witt12 pmid = {22190659}, month = {1}, gsid = {10444596397988543040}, - gscites = {31}, + gscites = {41}, + ss_id = {2f3bebbb947b31dc3f81d4ac5f7e1414e3172f25}, + all_ss_ids = {['2f3bebbb947b31dc3f81d4ac5f7e1414e3172f25']}, } @article{Witt12a, @@ -26611,6 +33086,9 @@ @article{Witt12a number = {2}, pmid = {21315530}, month = {2}, + ss_id = {2f10f440accedc9e5cac27b58f606939d41b9c0b}, + all_ss_ids = {['2f10f440accedc9e5cac27b58f606939d41b9c0b']}, + gscites = {17}, } @article{Witt12b, @@ -26628,7 +33106,9 @@ @article{Witt12b pmid = {22167514}, month = {6}, gsid = {10609484598656160943}, - gscites = {11}, + gscites = {14}, + ss_id = {55531229b8103969912f6d12933d5c2b30c616e2}, + all_ss_ids = {['55531229b8103969912f6d12933d5c2b30c616e2']}, } @phdthesis{Witt12c, @@ -26645,6 +33125,21 @@ @phdthesis{Witt12c journal = {PhD thesis}, } +@book{Witt15, + author = {Wittenberg, Rianne and Rossi, Santiago and Schaefer-Prokop, Cornelia}, + title = {~~Drug-Induced Interstitial Lung Disease in Oncology Patients}, + doi = {10.1007/174_2015_1080}, + year = {2015}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1007/174_2015_1080}, + file = {Witt15.pdf:pdf\\Witt15.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Imaging of Complications and Toxicity following Tumor Therapy}, + automatic = {yes}, + citation-count = {0}, + pages = {129-145}, +} + @article{Xie20, author = {Xie, Weiyi and Jacobs, Colin and Charbonnier, Jean-Paul and van Ginneken, Bram}, title = {Relational Modeling for Robust and Efficient Pulmonary Lobe Segmentation in {CT} Scans}, @@ -26660,8 +33155,9 @@ @article{Xie20 pmid = {32730216}, year = {2020}, gsid = {13494064903459246916}, - gscites = {5}, + gscites = {95}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/221569}, + all_ss_ids = {['a114f0f6dbeb11d47d0736a1ece21cbf4be27fb5', '32a8259a781ea7255621b0cbdeecfa8d03d7e0bd', '2e571724830cb8ca6e8dbe9cc1f92fdcfc517ec4']}, } @inproceedings{Xie21, @@ -26672,6 +33168,9 @@ @inproceedings{Xie21 url = {https://openreview.net/pdf?id=pOFGaVQeXAk}, file = {Xie21.pdf:pdf\\Xie21.pdf:PDF}, optnote = {DIAG, INPRESS, RADIOLOGY}, + ss_id = {29595323a6789fec907a39c4de1ad56b75c74630}, + all_ss_ids = {['29595323a6789fec907a39c4de1ad56b75c74630']}, + gscites = {0}, } @article{Xie21a, @@ -26683,6 +33182,19 @@ @article{Xie21a optnote = {DIAG}, } +@article{Xie22, + author = {Xie, Weiyi and Jacobs, Colin and Charbonnier, Jean-Paul and van Ginneken, Bram}, + title = {~~Structure and position-aware graph neural network for airway labeling}, + doi = {10.48550/ARXIV.2201.04532}, + year = {2022}, + abstract = {We present a novel graph-based approach for labeling the anatomical branches of a given airway tree segmentation. The proposed method formulates airway labeling as a branch classification problem in the airway tree graph, where branch features are extracted using convolutional neural networks (CNN) and enriched using graph neural networks. Our graph neural network is structure-aware by having each node aggregate information from its local neighbors and position-aware by encoding node positions in the graph. We evaluated the proposed method on 220 airway trees from subjects with various severity stages of Chronic Obstructive Pulmonary Disease (COPD). The results demonstrate that our approach is computationally efficient and significantly improves branch classification performance than the baseline method. The overall average accuracy of our method reaches 91.18\% for labeling all 18 segmental airway branches, compared to 83.83\% obtained by the standard CNN method. We published our source code at https://github.com/DIAGNijmegen/spgnn. The proposed algorithm is also publicly available at https://grand-challenge.org/algorithms/airway-anatomical-labeling/.}, + url = {https://arxiv.org/abs/2201.04532}, + file = {Xie22.pdf:pdf\\Xie22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {arXiv:2201.04532}, + automatic = {yes}, +} + @article{Xie23, author = {Xie, Weiyi and Jacobs, Colin and Charbonnier, Jean-Paul and van Ginneken, Bram}, title = {Dense regression activation maps for lesion segmentation in CT scans of COVID-19 patients}, @@ -26696,18 +33208,9 @@ @article{Xie23 code = {https://github.com/DIAGNijmegen/bodyct-dram}, year = {2023}, volume = {86}, -} - -@article{Xie23b, - author={Weiyi Xie and Colin Jacobs and Jean-Paul Charbonnier and Dirk Jan Slebos and Bram van Ginneken}, - title={Emphysema subtyping on thoracic computed tomography scans using deep neural networks}, - journal={Scientific Reports}, - volume={13}, - number={1}, - year={2023}, - doi = {10.1038/s41598-023-40116-6}, - pages = {14147}, - pmid = {37644032}, + ss_id = {b0255ad2c81689cfd14ec8573780400a5fbf7b99}, + all_ss_ids = {['b0255ad2c81689cfd14ec8573780400a5fbf7b99', '9a749224752f101c29f177f941bb5c967855db27']}, + gscites = {2}, } @phdthesis{Xie23a, @@ -26715,11 +33218,11 @@ @phdthesis{Xie23a title = {Deep Learning for Treatment Planning in Chronic Obstructive Pulmonary Diseases}, url = {https://repository.ubn.ru.nl/bitstream/handle/2066/294845/294845.pdf}, abstract = {In Chapter 1, we introduced chronic obstructive pulmonary disease (COPD) and gave background information on COPD diagnosis and treatment planning. We described the role of quantitative CT analysis in COPD treatment planning. Furthermore, we provided a short history of image analysis, from applying low-level image processing to deep learning-based CT analysis, explaining the reason behind deep learning prosperity on the road to being data-driven. -In Chapter 2, we presented a novel method using relational two-stage convolu-tion neural networks for segmenting pulmonary lobes in CT images. The proposed method uses a non-local neural network to capture visual and geometric correspondence between high-level convolution features, which represents the relationships between objects and object parts. Our results demonstrate that learning feature correspondence improves the lobe segmentation performance substantially than the baseline on the COPD and the COVID-19 data set. -In Chapter 3, we presented a method for labeling segmental airways given a segmented airway tree. First, we train a convolution neural network to extract features for representing airway branches. Then, these features are iteratively enriched in agraph neural network by collecting information from neighbors, where the graph is based on the airway tree connectivity. Furthermore, we leverage positional information in our graph neural network, where the position of each branch is encoded by its topological distance to a set of anchor branches. As a result, the learned features are structure- and position-aware, contributing to substantially improved branch classification results compared with methods that use only convolution features or standard graph neural networks. -In Chapter 4, we proposed a novel weakly-supervised segmentation framework trained end-to-end, using only image-level supervision. We show that this approach can produce high-resolution segmentation maps without voxel-level annotations.The proposed method substantially outperforms other weakly-supervised methods,although a gap with the fully-supervised performance remains. Our method trained a segmentation network to predict per-image lesion percentage. We made this training possible by proposing an interval regression loss, given only the upper and lower bound of the target percentage, not the exact percentage as supervision. Furthermore, we stabilized the regression training using equivariant regularization. In the refinement process, we proposed an attention neural network module that updated activation maps in one location using nearby activations, acting similar to random walkers, and seeded regional growth in standard post-processing pipelines, yet ours is trained end-to-end. -In Chapter 5, we expanded on the method outlined in Chapter 4 to predict emphysema subtypes. Our proposed algorithm generates high-resolution emphysema segmentation maps to aid in interpreting the prediction process, offering an improved model interpretability compared to the baseline. To predict both subtypes together, we employ the overlapping loss to ensure that each voxel is only assigned to onesubtype (centrilobular or paraseptal). We also use low-attenuation areas in the lung(LAA-950) as visual cues in regression training, providing the network with localized information. Our approach generates categorical visual scores, estimated emphysema percentages, and high-resolution segmentation maps for both centrilobularand paraseptal subtypes, making it more versatile than the baseline approach. -Finally, in Chapter 6, we reflected on this thesis’s main findings and contributions.We also analyzed the advances and impact in the field and the existing limitations of the proposed methods. Additionally, we provided a future outlook for research opportunities in the field of deep learning for medical image analysis.}, + In Chapter 2, we presented a novel method using relational two-stage convolu-tion neural networks for segmenting pulmonary lobes in CT images. The proposed method uses a non-local neural network to capture visual and geometric correspondence between high-level convolution features, which represents the relationships between objects and object parts. Our results demonstrate that learning feature correspondence improves the lobe segmentation performance substantially than the baseline on the COPD and the COVID-19 data set. + In Chapter 3, we presented a method for labeling segmental airways given a segmented airway tree. First, we train a convolution neural network to extract features for representing airway branches. Then, these features are iteratively enriched in agraph neural network by collecting information from neighbors, where the graph is based on the airway tree connectivity. Furthermore, we leverage positional information in our graph neural network, where the position of each branch is encoded by its topological distance to a set of anchor branches. As a result, the learned features are structure- and position-aware, contributing to substantially improved branch classification results compared with methods that use only convolution features or standard graph neural networks. + In Chapter 4, we proposed a novel weakly-supervised segmentation framework trained end-to-end, using only image-level supervision. We show that this approach can produce high-resolution segmentation maps without voxel-level annotations.The proposed method substantially outperforms other weakly-supervised methods,although a gap with the fully-supervised performance remains. Our method trained a segmentation network to predict per-image lesion percentage. We made this training possible by proposing an interval regression loss, given only the upper and lower bound of the target percentage, not the exact percentage as supervision. Furthermore, we stabilized the regression training using equivariant regularization. In the refinement process, we proposed an attention neural network module that updated activation maps in one location using nearby activations, acting similar to random walkers, and seeded regional growth in standard post-processing pipelines, yet ours is trained end-to-end. + In Chapter 5, we expanded on the method outlined in Chapter 4 to predict emphysema subtypes. Our proposed algorithm generates high-resolution emphysema segmentation maps to aid in interpreting the prediction process, offering an improved model interpretability compared to the baseline. To predict both subtypes together, we employ the overlapping loss to ensure that each voxel is only assigned to onesubtype (centrilobular or paraseptal). We also use low-attenuation areas in the lung(LAA-950) as visual cues in regression training, providing the network with localized information. Our approach generates categorical visual scores, estimated emphysema percentages, and high-resolution segmentation maps for both centrilobularand paraseptal subtypes, making it more versatile than the baseline approach. + Finally, in Chapter 6, we reflected on this thesis's main findings and contributions.We also analyzed the advances and impact in the field and the existing limitations of the proposed methods. Additionally, we provided a future outlook for research opportunities in the field of deep learning for medical image analysis.}, copromotor = {C. Jacobs}, file = {Xie23a.pdf:pdf\\Xie23a.pdf:PDF}, optnote = {DIAG}, @@ -26729,6 +33232,17 @@ @phdthesis{Xie23a journal = {PhD thesis}, } +@article{Xie23b, + author = {Weiyi Xie and Colin Jacobs and Jean-Paul Charbonnier and Dirk Jan Slebos and Bram van Ginneken}, + title = {Emphysema subtyping on thoracic computed tomography scans using deep neural networks}, + journal = {Scientific Reports}, + volume = {13}, + year = {2023}, + doi = {10.1038/s41598-023-40116-6}, + pages = {14147}, + ss_id = {3c7c167e0619911a29ce6082372100fbbc5ca7af}, + all_ss_ids = {['3c7c167e0619911a29ce6082372100fbbc5ca7af']}, + gscites = {0}, } @article{Yaka10, @@ -26748,6 +33262,8 @@ @article{Yaka10 gsid = {511205715423392373}, gscites = {66}, taverne_url = {https://repository.ubn.ru.nl/handle/2066/88584}, + ss_id = {45bced603af6c4b11db85018291705262a98768f}, + all_ss_ids = {['45bced603af6c4b11db85018291705262a98768f']}, } @article{Yaka11c, @@ -26766,6 +33282,8 @@ @article{Yaka11c month = {5}, gsid = {11125890377300683611}, gscites = {30}, + ss_id = {51c17b6d2b1fcd46fef6ceaa5569e39a951cb8a6}, + all_ss_ids = {['51c17b6d2b1fcd46fef6ceaa5569e39a951cb8a6']}, } @article{Yaka12, @@ -26784,6 +33302,23 @@ @article{Yaka12 month = {12}, } +@article{Youn21, + author = {Youn, Seo Yeon and Choi, Moon Hyung and Kim, Dong Hwan and Lee, Young Joon and Huisman, Henkjan and Johnson, Evan and Penzkofer, Tobias and Shabunin, Ivan and Winkel, David Jean and Xing, Pengyi and Szolar, Dieter and Grimm, Robert and von Busch, Heinrich and Son, Yohan and Lou, Bin and Kamen, Ali}, + title = {~~Detection and PI-RADS classification of focal lesions in prostate MRI: Performance comparison between a deep learning-based algorithm (DLA) and radiologists with various levels of experience}, + doi = {10.1016/j.ejrad.2021.109894}, + year = {2021}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejrad.2021.109894}, + file = {Youn21.pdf:pdf\\Youn21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + automatic = {yes}, + citation-count = {18}, + pages = {109894}, + volume = {142}, + pmid = {34388625}, +} + @article{Yous17, author = {Yousaf-Khan, Uraujh and van der Aalst, Carlijn and de Jong, Pim A and Heuvelmans, Marjolein and Scholten, Ernst and Walter, Joan and Nackaerts, Kristiaan and Groen, Harry and Vliegenthart, Rozemarijn and Ten Haaf, Kevin and Oudkerk, Matthijs and de Koning, Harry}, title = {Risk stratification based on screening history: the NELSON lung cancer screening study}, @@ -26800,6 +33335,23 @@ @article{Yous17 month = {3}, } +@article{Yous21, + author = {Yousif, Mustafa and van Diest, Paul J. and Laurinavicius, Arvydas and Rimm, David and van der Laak, Jeroen and Madabhushi, Anant and Schnitt, Stuart and Pantanowitz, Liron}, + title = {~~Artificial intelligence applied to breast pathology}, + doi = {10.1007/s00428-021-03213-3}, + year = {2021}, + abstract = {The convergence of digital pathology and computer vision is increasingly enabling computers to perform tasks performed by humans. As a result, artificial intelligence (AI) is having an astoundingly positive effect on the field of pathology, including breast pathology. Research using machine learning and the development of algorithms that learn patterns from labeled digital data based on "deep learning" neural networks and feature-engineered approaches to analyze histology images have recently provided promising results. Thus far, image analysis and more complex AI-based tools have demonstrated excellent success performing tasks such as the quantification of breast biomarkers and Ki67, mitosis detection, lymph node metastasis recognition, tissue segmentation for diagnosing breast carcinoma, prognostication, computational assessment of tumor-infiltrating lymphocytes, and prediction of molecular expression as well as treatment response and benefit of therapy from routine H&E images. This review critically examines the literature regarding these applications of AI in the area of breast pathology.}, + url = {http://dx.doi.org/10.1007/s00428-021-03213-3}, + file = {Yous21.pdf:pdf\\Yous21.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Virchows Archiv}, + automatic = {yes}, + citation-count = {13}, + pages = {191-209}, + volume = {480}, + pmid = {34791536}, +} + @inproceedings{Yu20, author = {Xin Yu and Bin Lou and Bibo Shi and David Winkel and Nacim Arrahmane and Mamadou Diallo and Tongbai Meng and Heinrich von Busch and Robert Grimm and Berthold Kiefer and Dorin Comaniciu and Ali Kamen and Henkjan Huisman and Andrew Rosenkrantz and Tobias Penzkofer and Ivan Shabunin and Moon Hyung Choi and Qingsong Yang and Dieter Szolar}, title = {False Positive Reduction Using Multiscale Contextual Features for Prostate Cancer Detection in Multi-Parametric {MRI} Scans}, @@ -26809,6 +33361,9 @@ @inproceedings{Yu20 month = {apr}, doi = {10.1109/isbi45749.2020.9098338}, abstract = {Prostate cancer (PCa) is the most prevalent and one of the leading causes of cancer death among men. Multi-parametric MRI (mp-MRI) is a prominent diagnostic scan, which could help in avoiding unnecessary biopsies for men screened for PCa. Artificial intelligence (AI) systems could help radiologists to be more accurate and consistent in diagnosing clinically significant cancer from mp-MRI scans. Lack of specificity has been identified recently as one of weak points of such assistance systems. In this paper, we propose a novel false positive reduction network to be added to the overall detection system to further analyze lesion candidates. The new network utilizes multiscale 2D image stacks of these candidates to discriminate between true and false positive detections. We trained and validated our network on a dataset with 2170 cases from seven different institutions and tested it on a separate independent dataset with 243 cases. With the proposed model, we achieved area under curve (AUC) of 0.876 on discriminating between true and false positive detected lesions and improved the AUC from 0.825 to 0.867 on overall identification of clinically significant cases.}, + ss_id = {af4eb854f4bb71a4007bdea42ccd6c79b2c70984}, + all_ss_ids = {['af4eb854f4bb71a4007bdea42ccd6c79b2c70984']}, + gscites = {24}, } @conference{Zaid14, @@ -26835,7 +33390,22 @@ @article{Zaid18 optnote = {DIAG, RADIOLOGY}, pmid = {30120345}, gsid = {14712131535144362806}, - gscites = {15}, + gscites = {45}, + all_ss_ids = {['ebb0d21c567909e31be5d43d72c8b3020f2708c9', 'fd2239f855b044b6ebe9fac5e0e01951bc9eddb4']}, +} + +@inproceedings{Zanj18, + author = {Zanjani, Farhad Ghazvinian and Zinger, Svitlana and Bejnordi, Babak Ehteshami and van der Laak, Jeroen A W M and de With, Peter H. N.}, + title = {~~Stain normalization of histopathology images using generative adversarial networks}, + doi = {10.1109/isbi.2018.8363641}, + year = {2018}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1109/ISBI.2018.8363641}, + file = {Zanj18.pdf:pdf\\Zanj18.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {2018 IEEE 15th International Symposium on Biomedical Imaging (ISBI 2018)}, + automatic = {yes}, + citation-count = {70}, } @conference{Zeel19, @@ -26843,18 +33413,18 @@ @conference{Zeel19 booktitle = ARVO, title = {{EyeNED} workstation: Development of a multi-modal vendor-independent application for annotation, spatial alignment and analysis of retinal images}, abstract = {Purpose: - Researchers and specialists in the field of ophthalmology currently rely on suboptimal vendor-specific software solutions for viewing and annotating retinal images. Our goal was to develop a fully-featured vendor-independent application that allows researchers and specialists to visualize multi-modal retinal images, perform spatial alignment and annotations, and review outputs of artificial intelligence (AI) algorithms. - - Methods: - The application consists of a web-based front-end that allows users to analyze baseline and follow-up images in a multi-modal viewer. It communicates with a back-end interface for grader authentication, loading and storing of images and annotation data. Several types of annotation techniques are available, ranging from image-level classification to point-based and region-based lesion-level annotations. - - The user can select color fundus (CF) images, optical coherence tomography (OCT) volumes, infrared (IR) and autofluorescence (AF) images to be shown simultaneously in the viewer. Spatial alignment of the different modalities can be performed using an integrated affine registration method by clicking on corresponding landmarks, after which a synchronized cursor will appear. After several graders have annotated lesions, the application can be used to compare these and create a consensus grading. - - Results : - The application was used by graders and researchers in the EyeNED research group. Region based annotations of geographic atrophy were made for 313 studies containing 488 CF images and 68 OCT images; and of drusen in 100 OCT b-scans. Semi-automatic annotation of the area of central retinal atrophy in Stargardt disease was performed for 67 AF images. Point-based annotation was carried out on lesions in 50 CF images of diabetic retinopathy patients. The multimodal viewing and localisation of lesions was perceived as particularly helpful in the grading of lesions and consensus discussions. - - Conclusions : - A software solution has been developed to assist researchers and specialists to view and annotate retinal images. The application was successfully used for annotating lesions in various imaging modalities, facilitating the grading of images in large studies and the collection of annotations for AI solutions.}, + Researchers and specialists in the field of ophthalmology currently rely on suboptimal vendor-specific software solutions for viewing and annotating retinal images. Our goal was to develop a fully-featured vendor-independent application that allows researchers and specialists to visualize multi-modal retinal images, perform spatial alignment and annotations, and review outputs of artificial intelligence (AI) algorithms. + + Methods: + The application consists of a web-based front-end that allows users to analyze baseline and follow-up images in a multi-modal viewer. It communicates with a back-end interface for grader authentication, loading and storing of images and annotation data. Several types of annotation techniques are available, ranging from image-level classification to point-based and region-based lesion-level annotations. + + The user can select color fundus (CF) images, optical coherence tomography (OCT) volumes, infrared (IR) and autofluorescence (AF) images to be shown simultaneously in the viewer. Spatial alignment of the different modalities can be performed using an integrated affine registration method by clicking on corresponding landmarks, after which a synchronized cursor will appear. After several graders have annotated lesions, the application can be used to compare these and create a consensus grading. + + Results : + The application was used by graders and researchers in the EyeNED research group. Region based annotations of geographic atrophy were made for 313 studies containing 488 CF images and 68 OCT images; and of drusen in 100 OCT b-scans. Semi-automatic annotation of the area of central retinal atrophy in Stargardt disease was performed for 67 AF images. Point-based annotation was carried out on lesions in 50 CF images of diabetic retinopathy patients. The multimodal viewing and localisation of lesions was perceived as particularly helpful in the grading of lesions and consensus discussions. + + Conclusions : + A software solution has been developed to assist researchers and specialists to view and annotate retinal images. The application was successfully used for annotating lesions in various imaging modalities, facilitating the grading of images in large studies and the collection of annotations for AI solutions.}, optnote = {DIAG, RADIOLOGY}, year = {2019}, gsid = {5177328248453722349}, @@ -26891,22 +33461,24 @@ @article{Zels15 pages = {1489--1496}, doi = {10.1016/j.acra.2015.08.006}, abstract = {RATIONALE AND OBJECTIVES: - To investigate the value of multiplanar reconstructions (MPRs) of automated three-dimensional (3D) breast ultrasound (ABUS) compared to transverse evaluation only, in differentiation of benign and malignant breast lesions. - - MATERIALS AND METHODS: - Five breast radiologists evaluated ABUS scans of 96 female patients with biopsy-proven abnormalities (36 malignant and 60 benign). They classified the most suspicious lesion based on the breast imaging reporting and data system (BI-RADS) lexicon using the transverse scans only. A likelihood-of-malignancy (LOM) score (0-100) and a BI-RADS final assessment were assigned. Thereafter, the MPR was provided and readers scored the cases again. In addition, they rated the presence of spiculation and retraction in the coronal plane on a five-point scale called Spiculation and Retraction Severity Index (SRSI). Reader performance was analyzed with receiver-operating characteristics analysis. - - RESULTS: - The area under the curve increased from 0.82 to 0.87 (P = .01) after readers were shown the reconstructed planes. The SRSI scores are highly correlated (Spearman's r) with the final LOM scores (range, r = 0.808-0.872) and DLOM scores (range, r = 0.525-0.836). Readers downgraded 3%-18% of the biopsied benign lesions to BI-RADS 2 after MPR evaluation. Inter-reader agreement for SRSI was substantial (intraclass correlation coefficient, 0.617). Inter-reader agreement of the BI-RADS final assessment improved from 0.367 to 0.536 after MPRs were read. - - CONCLUSIONS: - Full 3D evaluation of ABUS using MPR improves differentiation of breast lesions in comparison to evaluating only transverse planes. Results suggest that the added value of MPR might be related to visualization of spiculation and retraction patterns in the coronal reconstructions.}, + To investigate the value of multiplanar reconstructions (MPRs) of automated three-dimensional (3D) breast ultrasound (ABUS) compared to transverse evaluation only, in differentiation of benign and malignant breast lesions. + + MATERIALS AND METHODS: + Five breast radiologists evaluated ABUS scans of 96 female patients with biopsy-proven abnormalities (36 malignant and 60 benign). They classified the most suspicious lesion based on the breast imaging reporting and data system (BI-RADS) lexicon using the transverse scans only. A likelihood-of-malignancy (LOM) score (0-100) and a BI-RADS final assessment were assigned. Thereafter, the MPR was provided and readers scored the cases again. In addition, they rated the presence of spiculation and retraction in the coronal plane on a five-point scale called Spiculation and Retraction Severity Index (SRSI). Reader performance was analyzed with receiver-operating characteristics analysis. + + RESULTS: + The area under the curve increased from 0.82 to 0.87 (P = .01) after readers were shown the reconstructed planes. The SRSI scores are highly correlated (Spearman's r) with the final LOM scores (range, r = 0.808-0.872) and DLOM scores (range, r = 0.525-0.836). Readers downgraded 3%-18% of the biopsied benign lesions to BI-RADS 2 after MPR evaluation. Inter-reader agreement for SRSI was substantial (intraclass correlation coefficient, 0.617). Inter-reader agreement of the BI-RADS final assessment improved from 0.367 to 0.536 after MPRs were read. + + CONCLUSIONS: + Full 3D evaluation of ABUS using MPR improves differentiation of breast lesions in comparison to evaluating only transverse planes. Results suggest that the added value of MPR might be related to visualization of spiculation and retraction patterns in the coronal reconstructions.}, file = {Zels15.pdf:pdf\\Zels15.pdf:PDF}, optnote = {DIAG}, publisher = {Elsevier}, month = {12}, gsid = {5747981011507824307}, - gscites = {19}, + gscites = {34}, + ss_id = {ff84e1c5bae04497d875a331da0120f8a337d226}, + all_ss_ids = {['ff84e1c5bae04497d875a331da0120f8a337d226']}, } @article{Zels17a, @@ -26925,6 +33497,8 @@ @article{Zels17a pmid = {28576620}, gsid = {9326086829770411776}, gscites = {9}, + ss_id = {deda0fc0508e72e9cece3d27b74b0776e593d055}, + all_ss_ids = {['deda0fc0508e72e9cece3d27b74b0776e593d055']}, } @article{Zels17b, @@ -26942,7 +33516,26 @@ @article{Zels17b optnote = {DIAG, RADIOLOGY}, pmid = {28609204}, gsid = {3304914079633955853}, - gscites = {32}, + gscites = {52}, + ss_id = {c2da52941ea09be5f3a56087d52454da941e4fb5}, + all_ss_ids = {['c2da52941ea09be5f3a56087d52454da941e4fb5']}, +} + +@article{Zels17c, + author = {van Zelst, J.C.M. and Tan, T. and Platel, B. and de Jong, M. and Steenbakkers, A. and Mourits, M. and Grivegnee, A. and Borelli, C. and Karssemeijer, N. and Mann, R.M.}, + title = {~~Improved cancer detection in automated breast ultrasound by radiologists using Computer Aided Detection}, + doi = {10.1016/j.ejrad.2017.01.021}, + year = {2017}, + abstract = {Abstract unavailable}, + url = {http://dx.doi.org/10.1016/j.ejrad.2017.01.021}, + file = {Zels17c.pdf:pdf\\Zels17c.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {European Journal of Radiology}, + automatic = {yes}, + citation-count = {42}, + pages = {54-59}, + volume = {89}, + pmid = {28267549}, } @article{Zels18, @@ -26960,7 +33553,9 @@ @article{Zels18 optnote = {DIAG}, pmid = {29417251}, gsid = {14669183418477071058}, - gscites = {23}, + gscites = {51}, + ss_id = {c6b1c8590269a5789095eca5f28d49413045fd82}, + all_ss_ids = {['c6b1c8590269a5789095eca5f28d49413045fd82']}, } @article{Zels18a, @@ -26978,7 +33573,9 @@ @article{Zels18a optnote = {DIAG}, pmid = {29944483}, gsid = {14278207744774406949}, - gscites = {22}, + gscites = {45}, + ss_id = {87b0a4836bd2d2caeaea950037ba2bdc9a2443b1}, + all_ss_ids = {['87b0a4836bd2d2caeaea950037ba2bdc9a2443b1']}, } @article{Zels18b, @@ -27022,10 +33619,29 @@ @article{Zels19a pages = {312-320}, doi = {10.1177/0284185119858051}, abstract = {Background: Computer-aided detection software for automated breast ultrasound has been shown to have potential in improving the accuracy of radiologists. Alternative ways of implementing computer-aided detection, such as independent validation or preselecting suspicious cases, might also improve radiologists' accuracy. - Purpose: To investigate the effect of using computer-aided detection software to improve the performance of radiologists by validating findings reported by radiologists during screening with automated breast ultrasound. Material and Methods: Unilateral automated breast ultrasound exams were performed in 120 women with dense breasts that included 60 randomly selected normal exams, 30 exams with benign lesions, and 30 malignant cases (20 mammography-negative). Eight radiologists were instructed to detect breast cancer and rate lesions using BI-RADS and level-of-suspiciousness scores. Computer-aided detection software was used to check the validity of radiologists' findings. Findings found negative by computer-aided detection were not included in the readers' performance analysis; however, the nature of these findings were further analyzed. The area under the curve and the partial area under the curve for an interval in the range of 80%-100% specificity before and after validation of computer-aided detection were compared. Sensitivity was computed for all readers at a simulation of 90% specificity. Results: Partial AUC improved significantly from 0.126 (95% confidence interval [CI] = 0.098-0.153) to 0.142 (95% CI = 0.115-0.169) (P = 0.037) after computer-aided detection rejected mostly benign lesions and normal tissue scored BI-RADS 3 or 4. The full areas under the curve (0.823 vs. 0.833, respectively) were not significantly different (P = 0.743). Four cancers detected by readers were completely missed by computer-aided detection and four other cancers were detected by both readers and computer-aided detection but falsely rejected due to technical limitations of our implementation of computer-aided detection validation. In this study, validation of computer-aided detection discarded 42.6% of findings that were scored BI-RADS >=3 by the radiologists, of which 85.5% were non-malignant findings.Conclusion: Validation of radiologists' findings using computer-aided detection software for automated breast ultrasound has the potential to improve the performance of radiologists. Validation of computer-aided detection might be an efficient tool for double-reading strategies by limiting the amount of discordant cases needed to be double-read.}, + Purpose: To investigate the effect of using computer-aided detection software to improve the performance of radiologists by validating findings reported by radiologists during screening with automated breast ultrasound. Material and Methods: Unilateral automated breast ultrasound exams were performed in 120 women with dense breasts that included 60 randomly selected normal exams, 30 exams with benign lesions, and 30 malignant cases (20 mammography-negative). Eight radiologists were instructed to detect breast cancer and rate lesions using BI-RADS and level-of-suspiciousness scores. Computer-aided detection software was used to check the validity of radiologists' findings. Findings found negative by computer-aided detection were not included in the readers' performance analysis; however, the nature of these findings were further analyzed. The area under the curve and the partial area under the curve for an interval in the range of 80%-100% specificity before and after validation of computer-aided detection were compared. Sensitivity was computed for all readers at a simulation of 90% specificity. Results: Partial AUC improved significantly from 0.126 (95% confidence interval [CI] = 0.098-0.153) to 0.142 (95% CI = 0.115-0.169) (P = 0.037) after computer-aided detection rejected mostly benign lesions and normal tissue scored BI-RADS 3 or 4. The full areas under the curve (0.823 vs. 0.833, respectively) were not significantly different (P = 0.743). Four cancers detected by readers were completely missed by computer-aided detection and four other cancers were detected by both readers and computer-aided detection but falsely rejected due to technical limitations of our implementation of computer-aided detection validation. In this study, validation of computer-aided detection discarded 42.6% of findings that were scored BI-RADS >=3 by the radiologists, of which 85.5% were non-malignant findings.Conclusion: Validation of radiologists' findings using computer-aided detection software for automated breast ultrasound has the potential to improve the performance of radiologists. Validation of computer-aided detection might be an efficient tool for double-reading strategies by limiting the amount of discordant cases needed to be double-read.}, file = {Zels19a.pdf:pdf\\Zels19a.pdf:PDF}, optnote = {DIAG, RADIOLOGY}, pmid = {31324132}, + ss_id = {f0d2a9258dcc592bee57f97fe523c1a0c1af5856}, + all_ss_ids = {['f0d2a9258dcc592bee57f97fe523c1a0c1af5856']}, + gscites = {13}, +} + +@article{Zhai22, + author = {Zhai, Zhiwei and van Velzen, Sanne G. M. and Lessmann, Nikolas and Planken, Nils and Leiner, Tim and Isgum, Ivana}, + title = {~~Learning coronary artery calcium scoring in coronary CTA from non-contrast CT using unsupervised domain adaptation}, + doi = {10.3389/fcvm.2022.981901}, + year = {2022}, + abstract = {Deep learning methods have demonstrated the ability to perform accurate coronary artery calcium (CAC) scoring. However, these methods require large and representative training data hampering applicability to diverse CT scans showing the heart and the coronary arteries. Training methods that accurately score CAC in cross-domain settings remains challenging. To address this, we present an unsupervised domain adaptation method that learns to perform CAC scoring in coronary CT angiography (CCTA) from non-contrast CT (NCCT). To address the domain shift between NCCT (source) domain and CCTA (target) domain, feature distributions are aligned between two domains using adversarial learning. A CAC scoring convolutional neural network is divided into a feature generator that maps input images to features in the latent space and a classifier that estimates predictions from the extracted features. For adversarial learning, a discriminator is used to distinguish the features between source and target domains. Hence, the feature generator aims to extract features with aligned distributions to fool the discriminator. The network is trained with adversarial loss as the objective function and a classification loss on the source domain as a constraint for adversarial learning. In the experiments, three data sets were used. The network is trained with 1,687 labeled chest NCCT scans from the National Lung Screening Trial. Furthermore, 200 labeled cardiac NCCT scans and 200 unlabeled CCTA scans were used to train the generator and the discriminator for unsupervised domain adaptation. Finally, a data set containing 313 manually labeled CCTA scans was used for testing. Directly applying the CAC scoring network trained on NCCT to CCTA led to a sensitivity of 0.41 and an average false positive volume 140 mm3/scan. The proposed method improved the sensitivity to 0.80 and reduced average false positive volume of 20 mm3/scan. The results indicate that the unsupervised domain adaptation approach enables automatic CAC scoring in contrast enhanced CT while learning from a large and diverse set of CT scans without contrast. This may allow for better utilization of existing annotated data sets and extend the applicability of automatic CAC scoring to contrast-enhanced CT scans without the need for additional manual annotations. The code is publicly available at https://github.com/qurAI-amsterdam/CACscoringUsingDomainAdaptation.}, + url = {http://dx.doi.org/10.3389/fcvm.2022.981901}, + file = {Zhai22.pdf:pdf\\Zhai22.pdf:PDF}, + optnote = {DIAG, RADIOLOGY}, + journal = {Frontiers in Cardiovascular Medicine}, + automatic = {yes}, + citation-count = {0}, + volume = {9}, + pmid = {36172575}, } @article{Zhou20, @@ -27036,6 +33652,9 @@ @article{Zhou20 abstract = {Since its renaissance, deep learning has been widely used in various medical imaging tasks and has achieved remarkable success in many medical imaging applications, thereby propelling us into the so-called artificial intelligence (AI) era. It is known that the success of AI is mostly attributed to the availability of big data with annotations for a single task and the advances in high performance computing. However, medical imaging presents unique challenges that confront deep learning approaches. In this survey paper, we first highlight both clinical needs and technical challenges in medical imaging and describe how emerging trends in deep learning are addressing these issues. We cover the topics of network architecture, sparse and noisy labels, federating learning, interpretability, uncertainty quantification, etc. Then, we present several case studies that are commonly found in clinical practice, including digital pathology and chest, brain, cardiovascular, and abdominal imaging. Rather than presenting an exhaustive literature survey, we instead describe some prominent research highlights related to these case study applications. We conclude with a discussion and presentation of promising future directions.}, file = {:http\://arxiv.org/pdf/2008.09104v1:PDF}, optnote = {DIAG}, + ss_id = {4043785dacd1c04ed93ec1c08ecf779f4e1717fc}, + all_ss_ids = {['4043785dacd1c04ed93ec1c08ecf779f4e1717fc']}, + gscites = {337}, } @conference{Zrei17, @@ -27066,1485 +33685,3 @@ @conference{deVos2017 year = {2017}, optnote = {DIAG}, } - -@article{Laak00, - author = {van der Laak, J. A. W. M. and Pahlplatz, M. M. and Hanselaar, A. G. and de Wilde, P. C.}, - title = {Hue-saturation-density (HSD) model for stain recognition in digital images from transmitted light microscopy}, - issue = {4}, - pages = {275--284}, - volume = {39}, - abstract = {Transmitted light microscopy is used in pathology to examine stained tissues. Digital image analysis is gaining importance as a means to quantify alterations in tissues. A prerequisite for accurate and reproducible quantification is the possibility to recognise stains in a standardised manner, independently of variations in the staining density. The usefulness of three colour models was studied using data from computer simulations and experimental data from an immuno-doublestained tissue section. Direct use of the three intensities obtained by a colour camera results in the red-green-blue (RGB) model. By decoupling the intensity from the RGB data, the hue-saturation-intensity (HSI) model is obtained. However, the major part of the variation in perceived intensities in transmitted light microscopy is caused by variations in staining density. Therefore, the hue-saturation-density (HSD) transform was defined as the RGB to HSI transform, applied to optical density values rather than intensities for the individual RGB channels. In the RGB model, the mixture of chromatic and intensity information hampers standardisation of stain recognition. In the HSI model, mixtures of stains that could be distinguished from other stains in the RGB model could not be separated. The HSD model enabled all possible distinctions in a two-dimensional, standardised data space. In the RGB model, standardised recognition is only possible by using complex and time-consuming algorithms. The HSI model is not suitable for stain recognition in transmitted light microscopy. The newly derived HSD model was found superior to the existing models for this purpose.}, - file = {Laak00.pdf:pdf\\Laak00.pdf:PDF}, - journal = Cytometry, - month = apr, - optnote = {DIAG}, - pmid = {10738280}, - year = {2000}, - taverne_url = {https://repository.ubn.ru.nl/handle/2066/144809}, -} - -@article{Penz21, - author = {Penzkofer, Tobias and Padhani, Anwar R and Turkbey, Baris and Haider, Masoom A and Huisman, Henkjan and Walz, Jochen and Salomon, Georg and Schoots, Ivo G and Richenberg, Jonathan and Villeirs, Geert and Panebianco, Valeria and Rouviere, Olivier and Logager, Vibeke Berg and Barentsz, Jelle}, - title = {ESUR/ESUI position paper: developing artificial intelligence for precision diagnosis of prostate cancer using magnetic resonance imaging.}, - doi = {10.1007/s00330-021-08021-6}, - abstract = {Artificial intelligence developments are essential to the successful deployment of community-wide, MRI-driven prostate cancer diagnosis. AI systems should ensure that the main benefits of biopsy avoidance are delivered while maintaining consistent high specificities, at a range of disease prevalences. Since all current artificial intelligence / computer-aided detection systems for prostate cancer detection are experimental, multiple developmental efforts are still needed to bring the vision to fruition. Initial work needs to focus on developing systems as diagnostic supporting aids so their results can be integrated into the radiologists' workflow including gland and target outlining tasks for fusion biopsies. Developing AI systems as clinical decision-making tools will require greater efforts. The latter encompass larger multicentric, multivendor datasets where the different needs of patients stratified by diagnostic settings, disease prevalence, patient preference, and clinical setting are considered. AI-based, robust, standard operating procedures will increase the confidence of patients and payers, thus enabling the wider adoption of the MRI-directed approach for prostate cancer diagnosis. KEY POINTS: * AI systems need to ensure that the benefits of biopsy avoidance are delivered with consistent high specificities, at a range of disease prevalence. * Initial work has focused on developing systems as diagnostic supporting aids for outlining tasks, so they can be integrated into the radiologists' workflow to support MRI-directed biopsies. * Decision support tools require a larger body of work including multicentric, multivendor studies where the clinical needs, disease prevalence, patient preferences, and clinical setting are additionally defined.}, - journal = ER, - month = may, - pmid = {33991226}, - year = {2021}, - taverne_url = {https://repository.ubn.ru.nl/handle/2066/245173}, -} - -@article{Bili22, -title = {The Liver Tumor Segmentation Benchmark (LiTS)}, -journal = MIA, -pages = {102680}, -year = {2022}, -issn = {1361-8415}, -doi = {https://doi.org/10.1016/j.media.2022.102680}, -url = {https://www.sciencedirect.com/science/article/pii/S1361841522003085}, -author = {Patrick Bilic and Patrick Christ and Hongwei Bran Li and Eugene Vorontsov and Avi Ben-Cohen and Georgios Kaissis and Adi Szeskin and Colin Jacobs and Gabriel Efrain Humpire Mamani and Gabriel Chartrand and Fabian Lohöfer and Julian Walter Holch and Wieland Sommer and Felix Hofmann and Alexandre Hostettler and Naama Lev-Cohain and Michal Drozdzal and Michal Marianne Amitai and Refael Vivanti and Jacob Sosna and Ivan Ezhov and Anjany Sekuboyina and Fernando Navarro and Florian Kofler and Johannes C. Paetzold and Suprosanna Shit and Xiaobin Hu and Jana Lipková and Markus Rempfler and Marie Piraud and Jan Kirschke and Benedikt Wiestler and Zhiheng Zhang and Christian Hülsemeyer and Marcel Beetz and Florian Ettlinger and Michela Antonelli and Woong Bae and Míriam Bellver and Lei Bi and Hao Chen and Grzegorz Chlebus and Erik B. Dam and Qi Dou and Chi-Wing Fu and Bogdan Georgescu and Xavier Giró-i-Nieto and Felix Gruen and Xu Han and Pheng-Ann Heng and Jürgen Hesser and Jan Hendrik Moltz and Christian Igel and Fabian Isensee and Paul Jäger and Fucang Jia and Krishna Chaitanya Kaluva and Mahendra Khened and Ildoo Kim and Jae-Hun Kim and Sungwoong Kim and Simon Kohl and Tomasz Konopczynski and Avinash Kori and Ganapathy Krishnamurthi and Fan Li and Hongchao Li and Junbo Li and Xiaomeng Li and John Lowengrub and Jun Ma and Klaus Maier-Hein and Kevis-Kokitsi Maninis and Hans Meine and Dorit Merhof and Akshay Pai and Mathias Perslev and Jens Petersen and Jordi Pont-Tuset and Jin Qi and Xiaojuan Qi and Oliver Rippel and Karsten Roth and Ignacio Sarasua and Andrea Schenk and Zengming Shen and Jordi Torres and Christian Wachinger and Chunliang Wang and Leon Weninger and Jianrong Wu and Daguang Xu and Xiaoping Yang and Simon Chun-Ho Yu and Yading Yuan and Miao Yue and Liping Zhang and Jorge Cardoso and Spyridon Bakas and Rickmer Braren and Volker Heinemann and Christopher Pal and An Tang and Samuel Kadoury and Luc Soler and Bram van Ginneken and Hayit Greenspan and Leo Joskowicz and Bjoern Menze}, -keywords = {Segmentation, Liver, Liver tumor, Deep learning, Benchmark, CT}, -abstract = {In this work, we report the set-up and results of the Liver Tumor Segmentation Benchmark (LiTS), which was organized in conjunction with the IEEE International Symposium on Biomedical Imaging (ISBI) 2017 and the International Conferences on Medical Image Computing and Computer-Assisted Intervention (MICCAI) 2017 and 2018. The image dataset is diverse and contains primary and secondary tumors with varied sizes and appearances with various lesion-to-background levels (hyper-/hypo-dense), created in collaboration with seven hospitals and research institutions. Seventy-five submitted liver and liver tumor segmentation algorithms were trained on a set of 131 computed tomography (CT) volumes and were tested on 70 unseen test images acquired from different patients. We found that not a single algorithm performed best for both liver and liver tumors in the three events. The best liver segmentation algorithm achieved a Dice score of 0.963, whereas, for tumor segmentation, the best algorithms achieved Dices scores of 0.674 (ISBI 2017), 0.702 (MICCAI 2017), and 0.739 (MICCAI 2018). Retrospectively, we performed additional analysis on liver tumor detection and revealed that not all top-performing segmentation algorithms worked well for tumor detection. The best liver tumor detection method achieved a lesion-wise recall of 0.458 (ISBI 2017), 0.515 (MICCAI 2017), and 0.554 (MICCAI 2018), indicating the need for further research. LiTS remains an active benchmark and resource for research, e.g., contributing the liver-related segmentation tasks in http://medicaldecathlon.com/. In addition, both data and online evaluation are accessible via www.lits-challenge.com.}, -pmid = {36481607}, -volume = {84}, -} - -@InProceedings{Chel22, - author = {Eduard Chelebian and Francesco Ciompi}, - booktitle = {Learning Meaningful Representations of Life, NeurIPS 2022}, - title = {Seeded iterative clustering for histology region identification}, - abstract = {Annotations are necessary to develop computer vision algorithms for histopathology, but dense annotations at a high resolution are often time-consuming to make. Deep learning models for segmentation are a way to alleviate the process, but require large amounts of training data, training times and computing power. To address these issues, we present seeded iterative clustering to produce a coarse segmentation densely and at the whole slide level. The algorithm uses precomputed representations as the clustering space and a limited amount of sparse interactive annotations as seeds to iteratively classify image patches. We obtain a fast and effective way of generating dense annotations for whole slide images and a framework that allows the comparison of neural network latent representations in the context of transfer learning.}, - file = {Chel22.pdf:pdf\\Chel22.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - year = {2022}, -} - -@Article{Aubr22, - author = {Aubreville, Marc and Stathonikos, Nikolas and Bertram, Christof A. and Klopfleisch, Robert and Ter Hoeve, Natalie and Ciompi, Francesco and Wilm, Frauke and Marzahl, Christian and Donovan, Taryn A. and Maier, Andreas and Breen, Jack and Ravikumar, Nishant and Chung, Youjin and Park, Jinah and Nateghi, Ramin and Pourakpour, Fattaneh and Fick, Rutger H. J. and Ben Hadj, Saima and Jahanifar, Mostafa and Shephard, Adam and Dexl, Jakob and Wittenberg, Thomas and Kondo, Satoshi and Lafarge, Maxime W. and Koelzer, Viktor H. and Liang, Jingtang and Wang, Yubo and Long, Xi and Liu, Jingxin and Razavi, Salar and Khademi, April and Yang, Sen and Wang, Xiyue and Erber, Ramona and Klang, Andrea and Lipnik, Karoline and Bolfa, Pompei and Dark, Michael J. and Wasinger, Gabriel and Veta, Mitko and Breininger, Katharina}, - title = {Mitosis domain generalization in histopathology images - The MIDOG challenge.}, - doi = {10.1016/j.media.2022.102699}, - issn = {1361-8423}, - pages = {102699}, - pubstate = {aheadofprint}, - volume = {84}, - abstract = {The density of mitotic figures (MF) within tumor tissue is known to be highly correlated with tumor proliferation and thus is an important marker in tumor grading. Recognition of MF by pathologists is subject to a strong inter-rater bias, limiting its prognostic value. State-of-the-art deep learning methods can support experts but have been observed to strongly deteriorate when applied in a different clinical environment. The variability caused by using different whole slide scanners has been identified as one decisive component in the underlying domain shift. The goal of the MICCAI MIDOG 2021 challenge was the creation of scanner-agnostic MF detection algorithms. The challenge used a training set of 200 cases, split across four scanning systems. As test set, an additional 100 cases split across four scanning systems, including two previously unseen scanners, were provided. In this paper, we evaluate and compare the approaches that were submitted to the challenge and identify methodological factors contributing to better performance. The winning algorithm yielded an F score of 0.748 (CI95: 0.704-0.781), exceeding the performance of six experts on the same task.}, - citation-subset = {IM}, - country = {Netherlands}, - file = {Aubr22.pdf:pdf\\Aubr22.pdf:PDF}, - issn-linking = {1361-8415}, - journal = {Medical Image Analysis}, - keywords = {Challenge; Deep Learning; Domain generalization; Histopathology; Mitosis}, - nlm-id = {9713490}, - optnote = {RADIOLOGY, DIAG}, - owner = {NLM}, - pii = {S1361-8415(22)00327-9}, - pmid = {36463832}, - pubmodel = {Print-Electronic}, - revised = {2022-12-04}, - year = {2022}, -} - -@Article{Merc22, - author = {Mercan, Caner and Balkenhol, Maschenka and Salgado, Roberto and Sherman, Mark and Vielh, Philippe and Vreuls, Willem and Polónia, António and Horlings, Hugo M. and Weichert, Wilko and Carter, Jodi M. and Bult, Peter and Christgen, Matthias and Denkert, Carsten and van de Vijver, Koen and Bokhorst, John-Melle and van der Laak, Jeroen and Ciompi, Francesco}, - title = {Deep learning for fully-automated nuclear pleomorphism scoring in breast cancer.}, - doi = {10.1038/s41523-022-00488-w}, - issn = {2374-4677}, - issue = {1}, - pages = {120}, - pubstate = {epublish}, - volume = {8}, - abstract = {To guide the choice of treatment, every new breast cancer is assessed for aggressiveness (i.e., graded) by an experienced histopathologist. Typically, this tumor grade consists of three components, one of which is the nuclear pleomorphism score (the extent of abnormalities in the overall appearance of tumor nuclei). The degree of nuclear pleomorphism is subjectively classified from 1 to 3, where a score of 1 most closely resembles epithelial cells of normal breast epithelium and 3 shows the greatest abnormalities. Establishing numerical criteria for grading nuclear pleomorphism is challenging, and inter-observer agreement is poor. Therefore, we studied the use of deep learning to develop fully automated nuclear pleomorphism scoring in breast cancer. The reference standard used for training the algorithm consisted of the collective knowledge of an international panel of 10 pathologists on a curated set of regions of interest covering the entire spectrum of tumor morphology in breast cancer. To fully exploit the information provided by the pathologists, a first-of-its-kind deep regression model was trained to yield a continuous scoring rather than limiting the pleomorphism scoring to the standard three-tiered system. Our approach preserves the continuum of nuclear pleomorphism without necessitating a large data set with explicit annotations of tumor nuclei. Once translated to the traditional system, our approach achieves top pathologist-level performance in multiple experiments on regions of interest and whole-slide images, compared to a panel of 10 and 4 pathologists, respectively.}, - country = {United States}, - file = {Merc22.pdf:pdf\\Merc22.pdf:PDF}, - issn-linking = {2374-4677}, - journal = {NPJ breast cancer}, - nlm-id = {101674891}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pii = {120}, - pmc = {PMC9643392}, - pmid = {36347887}, - pubmodel = {Electronic}, - revised = {2022-11-16}, - year = {2022}, -} - -@Article{Mari22, - author = {Marini, Niccolò and Marchesin, Stefano and Otálora, Sebastian and Wodzinski, Marek and Caputo, Alessandro and van Rijthoven, Mart and Aswolinskiy, Witali and Bokhorst, John-Melle and Podareanu, Damian and Petters, Edyta and Boytcheva, Svetla and Buttafuoco, Genziana and Vatrano, Simona and Fraggetta, Filippo and van der Laak, Jeroen and Agosti, Maristella and Ciompi, Francesco and Silvello, Gianmaria and Muller, Henning and Atzori, Manfredo}, - title = {Unleashing the potential of digital pathology data by training computer-aided diagnosis models without human annotations.}, - doi = {10.1038/s41746-022-00635-4}, - issn = {2398-6352}, - issue = {1}, - pages = {102}, - pubstate = {epublish}, - volume = {5}, - abstract = {The digitalization of clinical workflows and the increasing performance of deep learning algorithms are paving the way towards new methods for tackling cancer diagnosis. However, the availability of medical specialists to annotate digitized images and free-text diagnostic reports does not scale with the need for large datasets required to train robust computer-aided diagnosis methods that can target the high variability of clinical cases and data produced. This work proposes and evaluates an approach to eliminate the need for manual annotations to train computer-aided diagnosis tools in digital pathology. The approach includes two components, to automatically extract semantically meaningful concepts from diagnostic reports and use them as weak labels to train convolutional neural networks (CNNs) for histopathology diagnosis. The approach is trained (through 10-fold cross-validation) on 3'769 clinical images and reports, provided by two hospitals and tested on over 11'000 images from private and publicly available datasets. The CNN, trained with automatically generated labels, is compared with the same architecture trained with manual labels. Results show that combining text analysis and end-to-end deep neural networks allows building computer-aided diagnosis tools that reach solid performance (micro-accuracy = 0.908 at image-level) based only on existing clinical data without the need for manual annotations.}, - country = {England}, - file = {Mari22.pdf:pdf\\Mari22.pdf:PDF}, - issn-linking = {2398-6352}, - journal = {NPJ digital medicine}, - nlm-id = {101731738}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pii = {102}, - pmc = {PMC9307641}, - pmid = {35869179}, - pubmodel = {Electronic}, - revised = {2022-07-29}, - year = {2022}, -} - -@Article{Marc22, - author = {Marchesin, Stefano and Giachelle, Fabio and Marini, Niccolò and Atzori, Manfredo and Boytcheva, Svetla and Buttafuoco, Genziana and Ciompi, Francesco and Di Nunzio, Giorgio Maria and Fraggetta, Filippo and Irrera, Ornella and Müller, Henning and Primov, Todor and Vatrano, Simona and Silvello, Gianmaria}, - title = {Empowering digital pathology applications through explainable knowledge extraction tools.}, - doi = {10.1016/j.jpi.2022.100139}, - issn = {2229-5089}, - pages = {100139}, - pubstate = {epublish}, - volume = {13}, - abstract = {Exa-scale volumes of medical data have been produced for decades. In most cases, the diagnosis is reported in free text, encoding medical knowledge that is still largely unexploited. In order to allow decoding medical knowledge included in reports, we propose an unsupervised knowledge extraction system combining a rule-based expert system with pre-trained Machine Learning (ML) models, namely the Semantic Knowledge Extractor Tool (SKET). Combining rule-based techniques and pre-trained ML models provides high accuracy results for knowledge extraction. This work demonstrates the viability of unsupervised Natural Language Processing (NLP) techniques to extract critical information from cancer reports, opening opportunities such as data mining for knowledge extraction purposes, precision medicine applications, structured report creation, and multimodal learning. SKET is a practical and unsupervised approach to extracting knowledge from pathology reports, which opens up unprecedented opportunities to exploit textual and multimodal medical information in clinical practice. We also propose SKET eXplained (SKET X), a web-based system providing visual explanations about the algorithmic decisions taken by SKET. SKET X is designed/developed to support pathologists and domain experts in understanding SKET predictions, possibly driving further improvements to the system.}, - country = {United States}, - file = {Marc22.pdf:pdf\\Marc22.pdf:PDF}, - journal = {Journal of pathology informatics}, - keywords = {Clinical practice; Digital pathology; Expert systems; Explainable AI; Knowledge extraction; Machine learning}, - nlm-id = {101528849}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pii = {100139}, - pmc = {PMC9577130}, - pmid = {36268087}, - pubmodel = {Electronic-eCollection}, - revised = {2022-10-22}, - year = {2022}, -} - -@Article{Muna22, - author = {Munari, Enrico and Querzoli, Giulia and Brunelli, Matteo and Marconi, Marcella and Sommaggio, Marco and Cocchi, Marco A. and Martignoni, Guido and Netto, George J. and Caliò, Anna and Quatrini, Linda and Mariotti, Francesca R. and Luchini, Claudio and Girolami, Ilaria and Eccher, Albino and Segala, Diego and Ciompi, Francesco and Zamboni, Giuseppe and Moretta, Lorenzo and Bogina, Giuseppe}, - title = {Comparison of three validated PD-L1 immunohistochemical assays in urothelial carcinoma of the bladder: interchangeability and issues related to patient selection.}, - doi = {10.3389/fimmu.2022.954910}, - issn = {1664-3224}, - pages = {954910}, - pubstate = {epublish}, - volume = {13}, - abstract = {Different programmed cell death-ligand 1 (PD-L1) assays and scoring algorithms are being used in the evaluation of PD-L1 expression for the selection of patients for immunotherapy in specific settings of advanced urothelial carcinoma (UC). In this paper, we sought to investigate three approved assays (Ventana SP142 and SP263, and Dako 22C3) in UC with emphasis on implications for patient selection for atezolizumab/pembrolizumab as the first line of treatment. Tumors from 124 patients with invasive UC of the bladder were analyzed using tissue microarrays (TMA). Serial sections were stained with SP263 and SP142 on Ventana Benchmark Ultra and with 22C3 on Dako Autostainer Link 48. Stains were evaluated independently by two observers and scored using the combined positive score (CPS) and tumor infiltrating immune cells (IC) algorithms. Differences in proportions (DP), overall percent agreement (OPA), positive percent agreement (PPA), negative percent agreement (NPA), and Cohen κ were calculated for all comparable cases. Good overall concordance in analytic performance was observed for 22C3 and SP263 with both scoring algorithms; specifically, the highest OPA was observed between 22C3 and SP263 (89.6%) when using CPS. On the other hand, SP142 consistently showed lower positivity rates with high differences in proportions (DP) compared with 22C3 and SP263 with both CPS and IC, and with a low PPA, especially when using the CPS algorithm. In conclusion, 22C3 and SP263 assays show comparable analytical performance while SP142 shows divergent staining results, with important implications for the selection of patients for both pembrolizumab and atezolizumab.}, - chemicals = {B7-H1 Antigen}, - citation-subset = {IM}, - completed = {2022-08-16}, - country = {Switzerland}, - file = {Muna22.pdf:pdf\\Muna22.pdf:PDF}, - issn-linking = {1664-3224}, - journal = {Frontiers in immunology}, - keywords = {B7-H1 Antigen, metabolism; Carcinoma, Transitional Cell, drug therapy, pathology; Humans; Immunohistochemistry; Patient Selection; Urinary Bladder; Urinary Bladder Neoplasms, pathology; PD-L1; assays; bladder; cancer; comparison; immunohistochemistry; prediction; urothelial}, - nlm-id = {101560960}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pii = {954910}, - pmc = {PMC9363581}, - pmid = {35967344}, - pubmodel = {Electronic-eCollection}, - revised = {2022-08-25}, - year = {2022}, -} - -@Article{Muna21a, - author = {Munari, Enrico and Mariotti, Francesca R. and Quatrini, Linda and Bertoglio, Pietro and Tumino, Nicola and Vacca, Paola and Eccher, Albino and Ciompi, Francesco and Brunelli, Matteo and Martignoni, Guido and Bogina, Giuseppe and Moretta, Lorenzo}, - title = {PD-1/PD-L1 in Cancer: Pathophysiological, Diagnostic and Therapeutic Aspects.}, - doi = {10.3390/ijms22105123}, - issn = {1422-0067}, - issue = {10}, - pubstate = {epublish}, - volume = {22}, - abstract = {Immune evasion is a key strategy adopted by tumor cells to escape the immune system while promoting their survival and metastatic spreading. Indeed, several mechanisms have been developed by tumors to inhibit immune responses. PD-1 is a cell surface inhibitory receptor, which plays a major physiological role in the maintenance of peripheral tolerance. In pathological conditions, activation of the PD-1/PD-Ls signaling pathway may block immune cell activation, a mechanism exploited by tumor cells to evade the antitumor immune control. Targeting the PD-1/PD-L1 axis has represented a major breakthrough in cancer treatment. Indeed, the success of PD-1 blockade immunotherapies represents an unprecedented success in the treatment of different cancer types. To improve the therapeutic efficacy, a deeper understanding of the mechanisms regulating PD-1 expression and signaling in the tumor context is required. We provide an overview of the current knowledge of PD-1 expression on both tumor-infiltrating T and NK cells, summarizing the recent evidence on the stimuli regulating its expression. We also highlight perspectives and limitations of the role of PD-L1 expression as a predictive marker, discuss well-established and novel potential approaches to improve patient selection and clinical outcome and summarize current indications for anti-PD1/PD-L1 immunotherapy.}, - chemicals = {B7-H1 Antigen, CD274 protein, human, Immune Checkpoint Inhibitors, PDCD1 protein, human, Programmed Cell Death 1 Receptor}, - citation-subset = {IM}, - completed = {2021-06-10}, - country = {Switzerland}, - file = {Muna21a.pdf:pdf\\Muna21a.pdf:PDF}, - issn-linking = {1422-0067}, - journal = {International journal of molecular sciences}, - keywords = {B7-H1 Antigen, antagonists & inhibitors, immunology, metabolism; Humans; Immune Checkpoint Inhibitors, therapeutic use; Immunotherapy, methods; Molecular Targeted Therapy; Neoplasms, diagnosis, drug therapy, physiopathology; Programmed Cell Death 1 Receptor, metabolism; Tumor Escape; NK cells; PD-1; PD-L1; cancer; glucocorticoids; immunotherapy}, - nlm-id = {101092791}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pii = {5123}, - pmc = {PMC8151504}, - pmid = {34066087}, - pubmodel = {Electronic}, - revised = {2021-06-10}, - year = {2021}, -} - -@Article{Hude20, - author = {Hudeček, Jan and Voorwerk, Leonie and van Seijen, Maartje and Nederlof, Iris and de Maaker, Michiel and van den Berg, Jose and van de Vijver, Koen K. and Sikorska, Karolina and Adams, Sylvia and Demaria, Sandra and Viale, Giuseppe and Nielsen, Torsten O. and Badve, Sunil S. and Michiels, Stefan and Symmans, William Fraser and Sotiriou, Christos and Rimm, David L. and Hewitt, Stephen M. and Denkert, Carsten and Loibl, Sibylle and Loi, Sherene and Bartlett, John M. S. and Pruneri, Giancarlo and Dillon, Deborah A. and Cheang, Maggie C. U. and Tutt, Andrew and Hall, Jacqueline A. and Kos, Zuzana and Salgado, Roberto and Kok, Marleen and Horlings, Hugo M. and Group, International Immuno-Oncology Biomarker Working}, - title = {Application of a risk-management framework for integration of stromal tumor-infiltrating lymphocytes in clinical trials.}, - doi = {10.1038/s41523-020-0155-1}, - issn = {2374-4677}, - pages = {15}, - pubstate = {epublish}, - volume = {6}, - abstract = {Stromal tumor-infiltrating lymphocytes (sTILs) are a potential predictive biomarker for immunotherapy response in metastatic triple-negative breast cancer (TNBC). To incorporate sTILs into clinical trials and diagnostics, reliable assessment is essential. In this review, we propose a new concept, namely the implementation of a risk-management framework that enables the use of sTILs as a stratification factor in clinical trials. We present the design of a biomarker risk-mitigation workflow that can be applied to any biomarker incorporation in clinical trials. We demonstrate the implementation of this concept using sTILs as an integral biomarker in a single-center phase II immunotherapy trial for metastatic TNBC (TONIC trial, NCT02499367), using this workflow to mitigate risks of suboptimal inclusion of sTILs in this specific trial. In this review, we demonstrate that a web-based scoring platform can mitigate potential risk factors when including sTILs in clinical trials, and we argue that this framework can be applied for any future biomarker-driven clinical trial setting.}, - country = {United States}, - file = {Hude20.pdf:pdf\\Hude20.pdf:PDF}, - investigator = {Hyytiäinen, Aini and Hida, Akira I and Thompson, Alastair and Lefevre, Alex and Lazar, Alexander J and Gown, Allen and Lo, Amy and Sapino, Anna and Madabhushi, Anant and Moreira, Andre and Richardson, Andrea and Vingiani, Andrea and Beck, Andrew H and Bellizzi, Andrew M and Guerrero, Angel and Grigoriadis, Anita and Ehinger, Anna and Garrido-Castro, Ana and Vincent-Salomon, Anne and Laenkholm, Anne-Vibeke and Sharma, Ashish and Cimino-Mathews, Ashley and Srinivasan, Ashok and Acs, Balazs and Singh, Baljit and Calhoun, Benjamin and Haibe-Kans, Benjamin and Solomon, Benjamin and Thapa, Bibhusal and Nelson, Brad H and Gallas, Brandon D and Castaneda, Carlos and Ballesteros-Merino, Carmen and Criscitiello, Carmen and Boeckx, Carolien and Colpaert, Cecile and Quinn, Cecily and Chennubhotla, Chakra S and Swanton, Charles and Solinas, Cinzia and Hiley, Crispin and Drubay, Damien and Bethmann, Daniel and Moore, David A and Larsimont, Denis and Sabanathan, Dhanusha and Peeters, Dieter and Zardavas, Dimitrios and Höflmayer, Doris and Johnson, Douglas B and Thompson, E Aubrey and Brogi, Edi and Perez, Edith and ElGabry, Ehab A and Stovgaard, Elisabeth Specht and Blackley, Elizabeth F and Roblin, Elvire and Reisenbichler, Emily and Bellolio, Enrique and Balslev, Eva and Chmielik, Ewa and Gaire, Fabien and Andre, Fabrice and Lu, Fang-I and Azmoudeh-Ardalan, Farid and Rojo, Federico and Gruosso, Tina and Ciompi, Francesco and Peale, Franklin and Hirsch, Fred R and Klauschen, Frederick and Penault-Llorca, Frédérique and Acosta Haab, Gabriela and Farshid, Gelareh and van den Eynden, Gert and Curigliano, Giuseppe and Floris, Giuseppe and Broeckx, Glenn and Gonzalez-Ericsson and Koeppen, Harmut and Haynes, Harry R and McArthur, Heather and Joensuu, Heikki and Olofsson, Helena and Lien, Huang-Chun and Chen, I-Chun and Cree, Ian and Frahm, Isabel and Brcic, Iva and Chan, Jack and Ziai, James and Brock, Jane and Wesseling, Jelle and Giltnane, Jennifer and Kerner, Jennifer K and Thagaard, Jeppe and Braybrooke, Jeremy P and van der Laak, Jeroen A W M and Lemonnier, Jerome and Zha, Jiping and Ribeiro, Joana and Lennerz, Jochen K and Carter, Jodi M and Saltz, Joel and Hartman, Johan and Hainfellner, Johannes and Quesne, John Le and Juco, Jonathon W and Reis-Filho, Jorge and Sanchez, Joselyn and Sparano, Joseph and Cucherousset, Joël and Araya, Juan Carlos and Adam, Julien and Balko, Justin M and Saeger, Kai and Siziopikou, Kalliopi and Willard-Gallo, Karen and Weber, Karsten and Pogue-Geile, Katherine L and Steele, Keith E and Emancipator, Kenneth and AbdulJabbar, Khalid and El Bairi, Khalid and Blenman, Kim R M and Allison, Kimberly H and Korski, Konstanty and Pusztai, Lajos and Comerma, Laura and Buisseret, Laurence and Cooper, Lee A D and Shi, Leming and Kooreman, Loes F S and Molinero, Luciana and Estrada, M Valeria and Lacroix-Triki, Magali and Al Bakir, Maise and Sebastian, Manu M and van de Vijver, Marc and Balancin, Marcelo Luiz and Dieci, Maria Vittoria and Mathieu, Marie-Christine and Rebelatto, Marlon C and Piccart, Martine and Hanna, Matthew G and Goetz, Matthew P and Preusser, Matthias and Khojasteh, Mehrnoush and Sanders, Melinda E and Regan, Meredith M and Barnes, Michael and Christie, Michael and Misialek, Michael and Ignatiadis, Michail and van Bockstal, Mieke and Castillo, Miluska and Amgad, Mohamed and Harbeck, Nadia and Tung, Nadine and Laudus, Nele and Sirtaine, Nicolas and Burchardi, Nicole and Ternes, Nils and Radosevic-Robin, Nina and Gluz, Oleg and Grimm, Oliver and Nuciforo, Paolo and Jank, Paul and Gonzalez-Ericsson, Paula and Kirtani, Pawan and Jelinic, Petar and Watson, Peter H and Savas, Peter and Francis, Prudence A and Russell, Prudence A and Singh, Rajendra and Kim, Rim S and Pierce, Robert H and Hills, Robert and Leon-Ferre, Roberto and de Wind, Roland and Shui, Ruohong and De Clercq, Sabine and Leung, Sam and Tabbarah, Sami and Souza, Sandra C and O'Toole, Sandra and Swain, Sandra and Dudgeon, Sarah and Willis, Scooter and Ely, Scott and Kim, Seong-Rim and Bedri, Shahinaz and Irshad, Sheeba and Liu, Shi-Wei and Goel, Shom and Hendry, Shona and Bianchi, Simonetta and Bragança, Sofia and Paik, Soonmyung and Wienert, Stephan and Fox, Stephen B and Luen, Stephen J and Naber, Stephen and Schnitt, Stuart J and Sua, Luz F and Lakhani, Sunil R and Fineberg, Susan and Soler, Teresa and Gevaert, Thomas and D'Alfonso, Timothy and John, Tom and Sugie, Tomohagu and Kurkure, Uday and Bossuyt, Veerle and Manem, Venkata and Cámara, Vincente Peg and Tong, Weida and Chen, Weijie and Yang, Wentao and Tran, William T and Wang, Yihong and Yuan, Yinyin and Allory, Yves and Husain, Zaheed and Bago-Horvath, Zsuzsanna}, - issn-linking = {2374-4677}, - journal = {NPJ breast cancer}, - keywords = {Biomarkers; Breast cancer; Tumour biomarkers; Tumour immunology}, - nlm-id = {101674891}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pii = {15}, - pmc = {PMC7217941}, - pmid = {32436923}, - pubmodel = {Electronic-eCollection}, - revised = {2022-03-23}, - year = {2020}, -} - - -@Article{Hend23, - author = {Hendrix, Nils and Hendrix, Ward and van Dijke, Kees and Maresch, Bas and Maas, Mario and Bollen, Stijn and Scholtens, Alexander and de Jonge, Milko and Ong, Lee-Ling Sharon and van Ginneken, Bram and Rutten, Matthieu}, - title = {Musculoskeletal radiologist-level performance by using deep learning for detection of scaphoid fractures on conventional multi-view radiographs of hand and wrist}, - doi = {https://doi.org/10.1007/s00330-022-09205-4}, - pages = {1575--1588}, - url = {https://link.springer.com/article/10.1007/s00330-022-09205-4}, - volume = {33}, - abstract = {Objectives: To assess how an artificial intelligence (AI) algorithm performs against five experienced musculoskeletal radiologists in diagnosing scaphoid fractures and whether it aids their diagnosis on conventional multi-view radiographs. - - Methods: Four datasets of conventional hand, wrist, and scaphoid radiographs were retrospectively acquired at two hospitals (hospitals A and B). Dataset 1 (12,990 radiographs from 3353 patients, hospital A) and dataset 2 (1117 radiographs from 394 patients, hospital B) were used for training and testing a scaphoid localization and laterality classification component. Dataset 3 (4316 radiographs from 840 patients, hospital A) and dataset 4 (688 radiographs from 209 patients, hospital B) were used for training and testing the fracture detector. The algorithm was compared with the radiologists in an observer study. Evaluation metrics included sensitivity, specificity, positive predictive value (PPV), area under the characteristic operating curve (AUC), Cohen's kappa coefficient (κ), fracture localization precision, and reading time. - - Results: The algorithm detected scaphoid fractures with a sensitivity of 72%, specificity of 93%, PPV of 81%, and AUC of 0.88. The AUC of the algorithm did not differ from each radiologist (0.87 [radiologists' mean], p ≥.05). AI assistance improved five out of ten pairs of inter-observer Cohen's κ agreements (p <.05) and reduced reading time in four radiologists (p <.001), but did not improve other metrics in the majority of radiologists (p ≥.05). - - Conclusions: The AI algorithm detects scaphoid fractures on conventional multi-view radiographs at the level of five experienced musculoskeletal radiologists and could significantly shorten their reading time.}, - file = {Hend23.pdf:pdf\\Hend23.pdf:PDF}, - journal = ER, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Article{Vina22, - author = {Vinayahalingam, Shankeeth and van Nistelrooij, Niels and van Ginneken, Bram and Bressem, Keno and Tröltzsch, Daniel and Heiland, Max and Flügge, Tabea and Gaudin, Robert}, - title = {Detection of mandibular fractures on panoramic radiographs using deep learning.}, - doi = {10.1038/s41598-022-23445-w}, - issn = {2045-2322}, - issue = {1}, - number = {1}, - pages = {19596}, - pubstate = {epublish}, - volume = {12}, - abstract = {Mandibular fractures are among the most frequent facial traumas in oral and maxillofacial surgery, accounting for 57% of cases. An accurate diagnosis and appropriate treatment plan are vital in achieving optimal re-establishment of occlusion, function and facial aesthetics. This study aims to detect mandibular fractures on panoramic radiographs (PR) automatically. 1624 PR with fractures were manually annotated and labelled as a reference. A deep learning approach based on Faster R-CNN and Swin-Transformer was trained and validated on 1640 PR with and without fractures. Subsequently, the trained algorithm was applied to a test set consisting of 149 PR with and 171 PR without fractures. The detection accuracy and the area-under-the-curve (AUC) were calculated. The proposed method achieved an F1 score of 0.947 and an AUC of 0.977. Deep learning-based assistance of clinicians may reduce the misdiagnosis and hence the severe complications.}, - citation-subset = {IM}, - completed = {2022-11-18}, - country = {England}, - file = {Vina22.pdf:pdf\\Vina22.pdf:PDF}, - issn-linking = {2045-2322}, - journal = {Scientific reports}, - keywords = {Humans; Radiography, Panoramic, methods; Deep Learning; Mandibular Fractures, diagnostic imaging; Algorithms; Area Under Curve}, - nlm-id = {101563288}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pii = {19596}, - pmc = {PMC9666517}, - pmid = {36379971}, - publisher = {Springer Science and Business Media {LLC}}, - pubmodel = {Electronic}, - revised = {2023-01-09}, - year = {2022}, -} - -@Article{Bohn22, - author = {Bohner, Lauren and Vinayahalingam, Shankeeth and Kleinheinz, Johannes and Hanisch, Marcel}, - title = {Digital Implant Planning in Patients with Ectodermal Dysplasia: Clinical Report.}, - doi = {10.3390/ijerph19031489}, - issn = {1660-4601}, - issue = {3}, - pubstate = {epublish}, - volume = {19}, - abstract = {Ectodermal dysplasia may severely affect the development of jaw growth and facial appearance. This case report describes the treatment of two patients suffering from ectodermal dysplasia, both treated with dental implant-fixed restorations by means of computer-guided surgery. Two patients presented to our clinic with congenital malformation of the jaw as a manifestation of ectodermal dysplasia, showing oligodontia and alveolar ridge deficit. Clinical examination revealed multiple unattached teeth and a need for prosthetic therapy. For both cases, dental implants were placed based on a computer-guided planning. A surgical guide was used to determine the positioning of the dental implants according to the prosthetic planning, which allowed for a satisfactory aesthetic and functional outcome. Computer-guided implant placement allowed predictable treatment of complex cases with satisfactory aesthetic and functional results. Adequate surgical and prosthetic planning is considered critical for treatment success.}, - citation-subset = {IM}, - completed = {2022-02-28}, - country = {Switzerland}, - file = {Bohn22.pdf:pdf\\Bohn22.pdf:PDF}, - issn-linking = {1660-4601}, - journal = {International journal of environmental research and public health}, - keywords = {Alveolar Process; Anodontia; Ectodermal Dysplasia, surgery; Humans; Treatment Outcome; computer-assisted; dental implants; rare diseases; surgery}, - nlm-id = {101238455}, - optnote = {RADIOLOGY, DIAG}, - owner = {NLM}, - pii = {1489}, - pmc = {PMC8835085}, - pmid = {35162510}, - pubmodel = {Electronic}, - revised = {2022-02-28}, - year = {2022}, -} - -@Article{Naik22, - author = {Naik, Nithesh and Hameed, B. M. Zeeshan and Sooriyaperakasam, Nilakshman and Vinayahalingam, Shankeeth and Patil, Vathsala and Smriti, Komal and Saxena, Janhavi and Shah, Milap and Ibrahim, Sufyan and Singh, Anshuman and Karimi, Hadis and Naganathan, Karthickeyan and Shetty, Dasharathraj K. and Rai, Bhavan Prasad and Chlosta, Piotr and Somani, Bhaskar K.}, - title = {Transforming healthcare through a digital revolution: A review of digital healthcare technologies and solutions.}, - doi = {10.3389/fdgth.2022.919985}, - issn = {2673-253X}, - pages = {919985}, - pubstate = {epublish}, - volume = {4}, - abstract = {The COVID-19 pandemic has put a strain on the entire global healthcare infrastructure. The pandemic has necessitated the re-invention, re-organization, and transformation of the healthcare system. The resurgence of new COVID-19 virus variants in several countries and the infection of a larger group of communities necessitate a rapid strategic shift. Governments, non-profit, and other healthcare organizations have all proposed various digital solutions. It's not clear whether these digital solutions are adaptable, functional, effective, or reliable. With the disease becoming more and more prevalent, many countries are looking for assistance and implementation of digital technologies to combat COVID-19. Digital health technologies for COVID-19 pandemic management, surveillance, contact tracing, diagnosis, treatment, and prevention will be discussed in this paper to ensure that healthcare is delivered effectively. Artificial Intelligence (AI), big data, telemedicine, robotic solutions, Internet of Things (IoT), digital platforms for communication (DC), computer vision, computer audition (CA), digital data management solutions (blockchain), digital imaging are premiering to assist healthcare workers (HCW's) with solutions that include case base surveillance, information dissemination, disinfection, and remote consultations, along with many other such interventions.}, - country = {Switzerland}, - file = {Naik22.pdf:pdf\\Naik22.pdf:PDF}, - issn-linking = {2673-253X}, - journal = {Frontiers in digital health}, - keywords = {artificial intelligence; blockchain; digital healthcare; healthcare; telemedicine}, - nlm-id = {101771889}, - optnote = {DIAG}, - owner = {NLM}, - pii = {919985}, - pmc = {PMC9385947}, - pmid = {35990014}, - pubmodel = {Electronic-eCollection}, - revised = {2022-08-23}, - year = {2022}, -} - -@Conference{Peet23, - author = {Peeters, Dr\'{e} and Alves, Nat\'{a}lia and Venkadesh, K and Dinnessen, R and Saghir, Z and Scholten, E and Huisman, H and Schaefer-Prokop, C and Vliegenthart, R and Prokop, M and Jacobs, C}, - booktitle = ECR, - title = {The effect of applying an uncertainty estimation method on the performance of a deep learning model for nodule malignancy risk estimation}, - abstract = {Purpose: Artificial Intelligence (AI) algorithms often lack uncertainty estimation for classification tasks. Uncertainty estimation may however be an important requirement for clinical adoption of AI algorithms. In this study, we integrate a method for uncertainty estimation into a previously developed AI algorithm and investigate the performance when applying different uncertainty thresholds. -Methods and materials: We used a retrospective external validation dataset from the Danish Lung Cancer Screening Trial, containing 818 benign and 65 malignant nodules. Our previously developed AI algorithm for nodule malignancy risk estimation was extended with a method for measuring the prediction uncertainty. The uncertainty score (UnS) was calculated by measuring the standard deviation over 20 different predictions of an ensemble of AI models. Two UnS thresholds at the 90th and 95th percentile were applied to retain 90% and 95% of all cases as certain, respectively. For these scenarios, we calculated the area under the ROC curve (AUC) for certain and uncertain cases, and for the full set of nodules. -Results: On the full set of 883 nodules, the AUC of the AI risk score was 0.932. For the 90th and 95th percentile, the AUC of the AI risk score for certain cases was 0.934 and 0.935, respectively, and for the uncertain cases was 0.710 and 0.688, respectively. -Conclusion: In this retrospective data set, we demonstrate that integrating an uncertainty estimation method into a deep learning-based nodule malignancy risk estimation algorithm slightly increased the performance on certain cases. The AI performance is substantially worse on uncertain cases and therefore in need of human visual review. -Limitations: This study is a retrospective analysis on data from one single lung cancer screening trial. More external validation is needed.}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Article{Adam22a, - author = {Adams, Lisa C. and Makowski, Marcus R. and Engel, Günther and Rattunde, Maximilian and Busch, Felix and Asbach, Patrick and Niehues, Stefan M. and Vinayahalingam, Shankeeth and van Ginneken, Bram and Litjens, Geert and Bressem, Keno K.}, - title = {Dataset of prostate MRI annotated for anatomical zones and cancer.}, - doi = {10.1016/j.dib.2022.108739}, - issn = {2352-3409}, - pages = {108739}, - pubstate = {epublish}, - volume = {45}, - abstract = {In the present work, we present a publicly available, expert-segmented representative dataset of 158 3.0 Tesla biparametric MRIs [1]. There is an increasing number of studies investigating prostate and prostate carcinoma segmentation using deep learning (DL) with 3D architectures [2], [3], [4], [5], [6], [7]. The development of robust and data-driven DL models for prostate segmentation and assessment is currently limited by the availability of openly available expert-annotated datasets [8], [9], [10]. The dataset contains 3.0 Tesla MRI images of the prostate of patients with suspected prostate cancer. Patients over 50 years of age who had a 3.0 Tesla MRI scan of the prostate that met PI-RADS version 2.1 technical standards were included. All patients received a subsequent biopsy or surgery so that the MRI diagnosis could be verified/matched with the histopathologic diagnosis. For patients who had undergone multiple MRIs, the last MRI, which was less than six months before biopsy/surgery, was included. All patients were examined at a German university hospital (Charité Universitätsmedizin Berlin) between 02/2016 and 01/2020. All MRI were acquired with two 3.0 Tesla MRI scanners (Siemens VIDA and Skyra, Siemens Healthineers, Erlangen, Germany). Axial T2W sequences and axial diffusion-weighted sequences (DWI) with apparent diffusion coefficient maps (ADC) were included in the data set. T2W sequences and ADC maps were annotated by two board-certified radiologists with 6 and 8 years of experience, respectively. For T2W sequences, the central gland (central zone and transitional zone) and peripheral zone were segmented. If areas of suspected prostate cancer (PIRADS score of ≥ 4) were identified on examination, they were segmented in both the T2W sequences and ADC maps. Because restricted diffusion is best seen in DWI images with high b-values, only these images were selected and all images with low b-values were discarded. Data were then anonymized and converted to NIfTI (Neuroimaging Informatics Technology Initiative) format.}, - country = {Netherlands}, - file = {Adam22a.pdf:pdf\\Adam22a.pdf:PDF}, - issn-linking = {2352-3409}, - journal = {Data in brief}, - keywords = {3.0 Tesla MRI; Apparent diffusion coefficient (ADC); Diffusion-weighted imaging; Pixel-wise segmentation; Prostate cancer; T2-weighted imaging}, - nlm-id = {101654995}, - optnote = {RADIOLOGY, DIAG}, - owner = {NLM}, - pii = {108739}, - pmc = {PMC9679750}, - pmid = {36426089}, - pubmodel = {Electronic-eCollection}, - revised = {2022-11-26}, - year = {2022}, -} - -@Article{Bemp22, - author = {Van den Bempt, Maxim and Vinayahalingam, Shankeeth and Han, Michael D. and Bergé, Stefaan J. and Xi, Tong}, - title = {The role of muscular traction in the occurrence of skeletal relapse after advancement bilateral sagittal split osteotomy (BSSO): A systematic review.}, - doi = {10.1111/ocr.12488}, - issn = {1601-6343}, - issue = {1}, - pages = {1--13}, - pubstate = {ppublish}, - volume = {25}, - abstract = {The aim of this systematic review was (i) to determine the role of muscular traction in the occurrence of skeletal relapse after advancement BSSO and (ii) to investigate the effect of advancement BSSO on the perimandibular muscles. This systematic review reports in accordance with the recommendations proposed by the Preferred Reporting Items for Systematic Reviews and Meta-Analyses (PRISMA) statement. Electronic database searches were performed in the databases MEDLINE, Embase and Cochrane Library. Inclusion criteria were as follows: assessment of relapse after advancement BSSO; assessment of morphological and functional change of the muscles after advancement BSSO; and clinical studies on human subjects. Exclusion criteria were as follows: surgery other than advancement BSSO; studies in which muscle activity/traction was not investigated; and case reports with a sample of five cases or fewer, review articles, meta-analyses, letters, congress abstracts or commentaries. Of the initial 1006 unique articles, 11 studies were finally included. In four studies, an intervention involving the musculature was performed with subsequent assessment of skeletal relapse. The changes in the morphological and functional properties of the muscles after BSSO were studied in seven studies. The findings of this review demonstrate that the perimandibular musculature plays a role in skeletal relapse after advancement BSSO and may serve as a target for preventive strategies to reduce this complication. However, further research is necessary to (i) develop a better understanding of the role of each muscle group, (ii) to develop new therapeutic strategies and (iii) to define criteria that allow identification of patients at risk.}, - chemicals = {Sitosterols, beta-sitosterol oleate}, - citation-subset = {IM}, - completed = {2022-01-28}, - country = {England}, - file = {Bemp22.pdf:pdf\\Bemp22.pdf:PDF}, - issn-linking = {1601-6335}, - journal = {Orthodontics & craniofacial research}, - keywords = {Humans; Mandible; Mandibular Advancement; Osteotomy; Recurrence; Sitosterols; Traction; BSSO; muscle; orthognathic surgery; relapse; stability}, - nlm-id = {101144387}, - optnote = {RADIOLOGY, DIAG}, - owner = {NLM}, - pmc = {PMC9292715}, - pmid = {33938136}, - pubmodel = {Print-Electronic}, - revised = {2022-07-31}, - year = {2022}, -} - -@Article{Fehe22, - author = {Feher, Balazs and Kuchler, Ulrike and Schwendicke, Falk and Schneider, Lisa and Cejudo Grano de Oro, Jose Eduardo and Xi, Tong and Vinayahalingam, Shankeeth and Hsu, Tzu-Ming Harry and Brinz, Janet and Chaurasia, Akhilanand and Dhingra, Kunaal and Gaudin, Robert Andre and Mohammad-Rahimi, Hossein and Pereira, Nielsen and Perez-Pastor, Francesc and Tryfonos, Olga and Uribe, Sergio E. and Hanisch, Marcel and Krois, Joachim}, - title = {Emulating Clinical Diagnostic Reasoning for Jaw Cysts with Machine Learning.}, - doi = {10.3390/diagnostics12081968}, - issn = {2075-4418}, - issue = {8}, - pubstate = {epublish}, - volume = {12}, - abstract = {The detection and classification of cystic lesions of the jaw is of high clinical relevance and represents a topic of interest in medical artificial intelligence research. The human clinical diagnostic reasoning process uses contextual information, including the spatial relation of the detected lesion to other anatomical structures, to establish a preliminary classification. Here, we aimed to emulate clinical diagnostic reasoning step by step by using a combined object detection and image segmentation approach on panoramic radiographs (OPGs). We used a multicenter training dataset of 855 OPGs (all positives) and an evaluation set of 384 OPGs (240 negatives). We further compared our models to an international human control group of ten dental professionals from seven countries. The object detection model achieved an average precision of 0.42 (intersection over union (IoU): 0.50, maximal detections: 100) and an average recall of 0.394 (IoU: 0.50-0.95, maximal detections: 100). The classification model achieved a sensitivity of 0.84 for odontogenic cysts and 0.56 for non-odontogenic cysts as well as a specificity of 0.59 for odontogenic cysts and 0.84 for non-odontogenic cysts (IoU: 0.30). The human control group achieved a sensitivity of 0.70 for odontogenic cysts, 0.44 for non-odontogenic cysts, and 0.56 for OPGs without cysts as well as a specificity of 0.62 for odontogenic cysts, 0.95 for non-odontogenic cysts, and 0.76 for OPGs without cysts. Taken together, our results show that a combined object detection and image segmentation approach is feasible in emulating the human clinical diagnostic reasoning process in classifying cystic lesions of the jaw.}, - country = {Switzerland}, - file = {Fehe22.pdf:pdf\\Fehe22.pdf:PDF}, - issn-linking = {2075-4418}, - journal = {Diagnostics (Basel, Switzerland)}, - keywords = {artificial intelligence; cysts; diagnosis; machine learning; oral; radiography; surgery}, - nlm-id = {101658402}, - optnote = {RADIOLOGY,DIAG}, - owner = {NLM}, - pii = {1968}, - pmc = {PMC9406703}, - pmid = {36010318}, - pubmodel = {Electronic}, - revised = {2022-08-30}, - year = {2022}, -} - -@Article{Esch22, - author = {Eschert, Tim and Schwendicke, Falk and Krois, Joachim and Bohner, Lauren and Vinayahalingam, Shankeeth and Hanisch, Marcel}, - title = {A Survey on the Use of Artificial Intelligence by Clinicians in Dentistry and Oral and Maxillofacial Surgery.}, - doi = {10.3390/medicina58081059}, - issn = {1648-9144}, - issue = {8}, - pubstate = {epublish}, - volume = {58}, - abstract = {Applications of artificial intelligence (AI) in medicine and dentistry have been on the rise in recent years. In dental radiology, deep learning approaches have improved diagnostics, outperforming clinicians in accuracy and efficiency. This study aimed to provide information on clinicians' knowledge and perceptions regarding AI. A 21-item questionnaire was used to study the views of dentistry professionals on AI use in clinical practice. In total, 302 questionnaires were answered and assessed. Most of the respondents rated their knowledge of AI as average (37.1%), below average (22.2%) or very poor (23.2%). The participants were largely convinced that AI would improve and bring about uniformity in diagnostics (mean Likert ± standard deviation 3.7 ± 1.27). Among the most serious concerns were the responsibility for machine errors (3.7 ± 1.3), data security or privacy issues (3.5 ± 1.24) and the divestment of healthcare to large technology companies (3.5 ± 1.28). : Within the limitations of this study, insights into the acceptance and use of AI in dentistry are revealed for the first time.}, - citation-subset = {IM}, - completed = {2022-08-29}, - country = {Switzerland}, - file = {Esch22.pdf:pdf\\Esch22.pdf:PDF}, - issn-linking = {1010-660X}, - journal = {Medicina (Kaunas, Lithuania)}, - keywords = {Artificial Intelligence; Humans; Surgery, Oral; Surveys and Questionnaires; artificial intelligence; clinicians survey; machine learning; perception; qualitative research}, - nlm-id = {9425208}, - optnote = {RADIOLOGY,DIAG}, - owner = {NLM}, - pii = {1059}, - pmc = {PMC9412897}, - pmid = {36013526}, - pubmodel = {Electronic}, - revised = {2022-08-30}, - year = {2022}, -} - -@Article{Chen22, - author = {Chen, Yun-Fang and Vinayahalingam, Shankeeth and Bergé, Stefaan and Liao, Yu-Fang and Maal, Thomas and Xi, Tong}, - title = {Is the pattern of mandibular asymmetry in mild craniofacial microsomia comparable to non-syndromic class II asymmetry?}, - doi = {10.1007/s00784-022-04429-6}, - issn = {1436-3771}, - issue = {6}, - pages = {4603--4613}, - pubstate = {ppublish}, - volume = {26}, - abstract = {To compare the characteristics of mandibular asymmetry in patients with unilateral craniofacial microsomia (CFM) and class II asymmetry. Pretreatment cone-beam computed tomography of consecutive adults with Pruzansky-Kaban type I and IIA CFM (CFM group) was analyzed by 3D cephalometry. Fourteen mandibular landmarks and two dental landmarks were identified. The mandibular size and positional asymmetry were calculated by using landmark-based linear and volumetric measurements, in terms of asymmetry ratios (affected/non-affected side) and absolute differences (affected - non-affected side). Results were compared with non-syndromic class II with matched severity of chin deviation (Class II group). Statistical analyses included independent t test, paired t test, chi-square test, and ANOVA. CFM group (n, 21; mean age, 20.4 ± 2.5 years) showed significantly larger size asymmetry in regions of mandibular body, ramus, and condyle compared to Class II group (n, 21; mean age, 27.8 ± 5.9 years) (p < 0.05). The curvature of mandibular body was asymmetric in CFM. Regarding the positional asymmetry of mandibular body, while a comparable transverse shift and a negligible yaw rotation were found among the two groups, the roll rotation in CFM was significantly greater as well as the occlusal (6.06° vs. 4.17°) and mandibular (7.84° vs. 2.80°) plane cants (p < 0.05). Mild CFM showed significantly more severe size asymmetry and roll rotation in mandible than non-CFM class II asymmetry. To improve the mandibular size and positional asymmetry in CFM, adjunct hard tissue augmentation or reduction in addition to OGS orthodontics with a meticulous roll and yaw planning is compulsory, which is expected to be distinct from treating non-CFM class II asymmetry.}, - completed = {2022-06-20}, - country = {Germany}, - file = {Chen22.pdf:pdf\\Chen22.pdf:PDF}, - issn-linking = {1432-6981}, - journal = {Clinical oral investigations}, - keywords = {Adolescent; Adult; Cephalometry, methods; Cone-Beam Computed Tomography, methods; Facial Asymmetry, diagnostic imaging; Goldenhar Syndrome, diagnostic imaging; Humans; Imaging, Three-Dimensional, methods; Mandible, diagnostic imaging; Young Adult; Class II asymmetry; Craniofacial microsomia; Mandibular asymmetry; Orthognathic surgery planning}, - nlm-id = {9707115}, - optnote = {RADIOLOGY,DIAG}, - owner = {NLM}, - pii = {10.1007/s00784-022-04429-6}, - pmc = {PMC9203369}, - pmid = {35218426}, - pubmodel = {Print-Electronic}, - revised = {2022-07-16}, - year = {2022}, -} - -@Article{Adam22b, - author = {Adams, Lisa C. and Makowski, Marcus R. and Engel, Günther and Rattunde, Maximilian and Busch, Felix and Asbach, Patrick and Niehues, Stefan M. and Vinayahalingam, Shankeeth and van Ginneken, Bram and Litjens, Geert and Bressem, Keno K.}, - title = {Prostate158 - An expert-annotated 3T MRI dataset and algorithm for prostate cancer detection.}, - doi = {10.1016/j.compbiomed.2022.105817}, - issn = {1879-0534}, - pages = {105817}, - pubstate = {ppublish}, - volume = {148}, - abstract = {The development of deep learning (DL) models for prostate segmentation on magnetic resonance imaging (MRI) depends on expert-annotated data and reliable baselines, which are often not publicly available. This limits both reproducibility and comparability. Prostate158 consists of 158 expert annotated biparametric 3T prostate MRIs comprising T2w sequences and diffusion-weighted sequences with apparent diffusion coefficient maps. Two U-ResNets trained for segmentation of anatomy (central gland, peripheral zone) and suspicious lesions for prostate cancer (PCa) with a PI-RADS score of ≥4 served as baseline algorithms. Segmentation performance was evaluated using the Dice similarity coefficient (DSC), the Hausdorff distance (HD), and the average surface distance (ASD). The Wilcoxon test with Bonferroni correction was used to evaluate differences in performance. The generalizability of the baseline model was assessed using the open datasets Medical Segmentation Decathlon and PROSTATEx. Compared to Reader 1, the models achieved a DSC/HD/ASD of 0.88/18.3/2.2 for the central gland, 0.75/22.8/1.9 for the peripheral zone, and 0.45/36.7/17.4 for PCa. Compared with Reader 2, the DSC/HD/ASD were 0.88/17.5/2.6 for the central gland, 0.73/33.2/1.9 for the peripheral zone, and 0.4/39.5/19.1 for PCa. Interrater agreement measured in DSC/HD/ASD was 0.87/11.1/1.0 for the central gland, 0.75/15.8/0.74 for the peripheral zone, and 0.6/18.8/5.5 for PCa. Segmentation performances on the Medical Segmentation Decathlon and PROSTATEx were 0.82/22.5/3.4; 0.86/18.6/2.5 for the central gland, and 0.64/29.2/4.7; 0.71/26.3/2.2 for the peripheral zone. We provide an openly accessible, expert-annotated 3T dataset of prostate MRI and a reproducible benchmark to foster the development of prostate segmentation algorithms.}, - citation-subset = {IM}, - completed = {2022-08-30}, - country = {United States}, - file = {Adam22b.pdf:pdf\\Adam22b.pdf:PDF}, - issn-linking = {0010-4825}, - journal = {Computers in biology and medicine}, - keywords = {Algorithms; Humans; Magnetic Resonance Imaging; Male; Prostate; Prostatic Neoplasms; Reproducibility of Results; Retrospective Studies; Artificial intelligence; Biparametric prostate MRI; Deep learning; Machine learning; Magnetic resonance imaging; Prostate cancer}, - nlm-id = {1250250}, - optnote = {RADIOLOGY,DIAG}, - owner = {NLM}, - pii = {S0010-4825(22)00578-9}, - pmid = {35841780}, - pubmodel = {Print-Electronic}, - revised = {2022-09-29}, - year = {2022}, -} - -@Article{Sadr23, - author = {Sadr, Soroush and Mohammad-Rahimi, Hossein and Motamedian, Saeed Reza and Zahedrozegar, Samira and Motie, Parisa and Vinayahalingam, Shankeeth and Dianat, Omid and Nosrat, Ali}, - title = {Deep Learning for Detection of Periapical Radiolucent Lesions: A Systematic Review and Meta-analysis of Diagnostic Test Accuracy.}, - doi = {10.1016/j.joen.2022.12.007}, - issn = {1878-3554}, - issue = {3}, - pages = {248--261.e3}, - pubstate = {ppublish}, - volume = {49}, - abstract = {The aim of this systematic review and meta-analysis was to investigate the overall accuracy of deep learning models in detecting periapical (PA) radiolucent lesions in dental radiographs, when compared to expert clinicians. Electronic databases of Medline (via PubMed), Embase (via Ovid), Scopus, Google Scholar, and arXiv were searched. Quality of eligible studies was assessed by using Quality Assessment and Diagnostic Accuracy Tool-2. Quantitative analyses were conducted using hierarchical logistic regression for meta-analyses on diagnostic accuracy. Subgroup analyses on different image modalities (PA radiographs, panoramic radiographs, and cone beam computed tomographic images) and on different deep learning tasks (classification, segmentation, object detection) were conducted. Certainty of evidence was assessed by using Grading of Recommendations Assessment, Development, and Evaluation system. A total of 932 studies were screened. Eighteen studies were included in the systematic review, out of which 6 studies were selected for quantitative analyses. Six studies had low risk of bias. Twelve studies had risk of bias. Pooled sensitivity, specificity, positive likelihood ratio, negative likelihood ratio, and diagnostic odds ratio of included studies (all image modalities; all tasks) were 0.925 (95% confidence interval [CI], 0.862-0.960), 0.852 (95% CI, 0.810-0.885), 6.261 (95% CI, 4.717-8.311), 0.087 (95% CI, 0.045-0.168), and 71.692 (95% CI, 29.957-171.565), respectively. No publication bias was detected (Egger's test, P = .82). Grading of Recommendations Assessment, Development and Evaluationshowed a "high" certainty of evidence for the studies included in the meta-analyses. Compared to expert clinicians, deep learning showed highly accurate results in detecting PA radiolucent lesions in dental radiographs. Most studies had risk of bias. There was a lack of prospective studies.}, - completed = {2023-02-28}, - country = {United States}, - file = {Sadr23.pdf:pdf\\Sadr23.pdf:PDF}, - issn-linking = {0099-2399}, - journal = {Journal of endodontics}, - keywords = {Deep Learning; Cone-Beam Computed Tomography, methods; Radiography, Panoramic; Diagnostic Tests, Routine; Sensitivity and Specificity; Artificial intelligence; deep learning; dental radiograph; periapical lesion; sensitivity; specificity}, - nlm-id = {7511484}, - optnote = {RADIOLOGY,DIAG}, - owner = {NLM}, - pii = {S0099-2399(22)00845-7}, - pmid = {36563779}, - pubmodel = {Print-Electronic}, - revised = {2023-02-28}, - year = {2023}, -} - -@Conference{Anto23, - author = {Antonissen, N and Venkadesh, K and Gietema, H and Vliegenthart, R and Saghir, Z and Scholten, E. Th and Prokop, M and Schaefer-Prokop, C and Jacobs, C }, - booktitle = ECR, - title = {Retrospective validation of nodule management based on deep learning-based malignancy thresholds in lung cancer screening}, - abstract = {Purpose: We previously developed and validated a deep learning (DL) algorithm for malignancy risk estimation of screen-detected nodules. The nodule risk cut-off for a positive screen, triggering more intensive follow-up (either short-term follow-up, PET-CT or biopsy), varies in existing nodule management protocols; 1-2% for Lung-RADS (cat 3), 6% for PanCan2b (CAT3). In this study, we investigated two DL-based malignancy thresholds to define a positive screen, compared to existing nodule management protocols. -Methods and materials: All baseline CT-scans from the Danish Lung Cancer Screening Trial were linked to lung cancer diagnosis within 2 years, resulting in 2,019 non-cancer and 18 cancer cases. The DL-based malignancy risk was computed for all screen-detected nodules using two malignancy risk cut-off points (6% and 10%), as threshold for a positive screen. For both Lung-RADS and PanCan2b, we used the published nodule-risk cut-offs for a positive screen. Sensitivity and False Positive Rate (FPR) were calculated for all baseline scans (n=2,037) using the risk dominant nodule per scan. -Results: At a threshold of 6%, DL achieved the highest sensitivity with 88.9% compared to 83.3% of Lung-RADS and 77.8% with PanCan2b. DL and PanCan2b yielded comparable FPR of 3.6% and 4.1%, respectively, while Lung-RADS had a higher FPR of 8.7%. Increasing the DL threshold to ≥10% resulted in a sensitivity of 88.9%, and a FPR of 2.5%. -Conclusion: DL-based nodule risk cut-offs achieved the highest sensitivity and lowest FPR for defining a positive screen, triggering more intense diagnostic work-up. Increasing the risk cut-off from 6% to 10% further decreased the FPR without alteration of sensitivity. -Limitations: This study is a retrospective analysis on data from one screening trial and one screening round. More external validation is needed, including validation for incidence screenings.}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Article{Sido23, - author = {Sidorenkov, Grigory and Stadhouders, Ralph and Jacobs, Colin and Mohamed Hoesein, Firdaus A. A. and Gietema, Hester A. and Nackaerts, Kristiaan and Saghir, Zaigham and Heuvelmans, Marjolein A. and Donker, Hylke C. and Aerts, Joachim G. and Vermeulen, Roel and Uitterlinden, Andre and Lenters, Virissa and van Rooij, Jeroen and Schaefer-Prokop, Cornelia and Groen, Harry J. M. and de Jong, Pim A. and Cornelissen, Robin and Prokop, Mathias and de Bock, Geertruida H. and Vliegenthart, Rozemarijn}, - title = {Multi-source data approach for personalized outcome prediction in lung cancer screening: update from the NELSON trial.}, - doi = {10.1007/s10654-023-00975-9}, - issn = {1573-7284}, - abstract = {Trials show that low-dose computed tomography (CT) lung cancer screening in long-term (ex-)smokers reduces lung cancer mortality. However, many individuals were exposed to unnecessary diagnostic procedures. This project aims to improve the efficiency of lung cancer screening by identifying high-risk participants, and improving risk discrimination for nodules. This study is an extension of the Dutch-Belgian Randomized Lung Cancer Screening Trial, with a focus on personalized outcome prediction (NELSON-POP). New data will be added on genetics, air pollution, malignancy risk for lung nodules, and CT biomarkers beyond lung nodules (emphysema, coronary calcification, bone density, vertebral height and body composition). The roles of polygenic risk scores and air pollution in screen-detected lung cancer diagnosis and survival will be established. The association between the AI-based nodule malignancy score and lung cancer will be evaluated at baseline and incident screening rounds. The association of chest CT imaging biomarkers with outcomes will be established. Based on these results, multisource prediction models for pre-screening and post-baseline-screening participant selection and nodule management will be developed. The new models will be externally validated. We hypothesize that we can identify 15-20% participants with low-risk of lung cancer or short life expectancy and thus prevent ~140,000 Dutch individuals from being screened unnecessarily. We hypothesize that our models will improve the specificity of nodule management by 10% without loss of sensitivity as compared to assessment of nodule size/growth alone, and reduce unnecessary work-up by 40-50%.}, - citation-subset = {IM}, - file = {Sido23.pdf:pdf\\Sido23.pdf:PDF}, - issn-linking = {0393-2990}, - journal = {European journal of epidemiology}, - keywords = {CT screening; Imaging biomarkers; Lung cancer; Lung nodules; Prediction model}, - nlm-id = {8508062}, - optnote = {DIAG, RADIOLOGY}, - owner = {NLM}, - pii = {10.1007/s10654-023-00975-9}, - pmid = {36943671}, - year = {2023}, - volume = {38}, - number = {4}, - pages = {445--454}, -} - -@article{Graa23, - title={MRI image features with an evident relation to low back pain: a narrative review}, - author={van der Graaf, Jasper W and Kroeze, Robert Jan and Buckens, Constantinus FM and Lessmann, Nikolas and van Hooff, Miranda L}, - journal={European Spine Journal}, - pages={1--12}, - year={2023}, - publisher={Springer}, - abstract={Purpose: Low back pain (LBP) is one of the most prevalent health condition worldwide and responsible for the most years lived with disability, yet the etiology is often unknown. Magnetic resonance imaging (MRI) is frequently used for treatment decision even though it is often inconclusive. There are many different image features that could relate to low back pain. Conversely, multiple etiologies do relate to spinal degeneration but do not actually cause the perceived pain. This narrative review provides an overview of all possible relevant features visible on MRI images and determines their relation to LBP. Methods: We conducted a separate literature search per image feature. All included studies were scored using the GRADE guidelines. Based on the reported results per feature an evidence agreement (EA) score was provided, enabling us to compare the collected evidence of separate image features. The various relations between MRI features and their associated pain mechanisms were evaluated to provide a list of features that are related to LBP. Results: All searches combined generated a total of 4472 hits of which 31 articles were included. Features were divided into five different categories:’discogenic’, ‘neuropathic’,’osseous’, ‘facetogenic’, and’paraspinal’, and discussed separately. Conclusion: Our research suggests that type I Modic changes, disc degeneration, endplate defects, disc herniation, spinal canal stenosis, nerve compression, and muscle fat infiltration have the highest probability to be related to LBP. These can be used to improve clinical decision-making for patients with LBP based on MRI.}, - doi={10.1007/s00586-023-07602-x}, -} - -@Conference{Graa22, - title={Segmentation of vertebrae and intervertebral discs in lumbar spine MR images with iterative instance segmentation}, - author={van der Graaf, Jasper W and van Hooff, Miranda L and Buckens, Constantinus FM and Lessmann, Nikolas}, - booktitle={Medical Imaging 2022: Image Processing}, - volume={12032}, - pages={909--913}, - year={2022}, - organization={SPIE}, - abstract={Segmentation of vertebrae and intervertebral discs (IVD) in MR images are important steps for automatic image analysis. This paper proposes an extension of an iterative vertebra segmentation method that relies on a 3D fully-convolutional neural network to segment the vertebrae one-by-one. We augment this approach with an additional segmentation step following each vertebra detection to also segment the IVD below each vertebra. To train and test the algorithm, we collected and annotated T2-weighted sagittal lumbar spine MR scans of 53 patients. The presented approach achieved a mean Dice score of 93 % ± 2 % for vertebra segmentation and 86 % ± 7 % for IVD segmentation. The method was able to cope with pathological abnormalities such as compression fractures, Schmorl’s nodes and collapsed IVDs. In comparison, a similar network trained for IVD segmentation without knowledge of the adjacent vertebra segmentation result did not detect all IVDs (89 %) and also achieved a lower Dice score of 83 % ± 9 %. These results indicate that combining IVD segmentation with vertebra segmentation in lumbar spine MR images can help to improve the detection and segmentation performance compared with separately segmenting these structures.}, - doi={10.1117/12.2611423}, -} - -@InProceedings{Alves22c, - author = {Alves, Nat{\'a}lia and de Wilde, Bram}, - booktitle = {Fast and Low-Resource Semi-supervised Abdominal Organ Segmentation}, - title = {Uncertainty-Guided Self-learning Framework for Semi-supervised Multi-organ Segmentation}, - doi = {https://doi.org/10.1007/978-3-031-23911-3_11}, - editor = {Ma, Jun and Wang, Bo}, - isbn = {978-3-031-23911-3}, - pages = {116--127}, - publisher = {Springer Nature Switzerland}, - address = {Cham}, - optnote = {RADIOLOGY, DIAG}, - year = {2022}, -} - -@Article{Wild23a, - author = {de Wilde, Bram and Joosten, Frank and Venderink, Wulphert and Davidse, Mirjam E. J. and Geurts, Juliëtte and Kruijt, Hanneke and Vermeulen, Afke and Martens, Bibi and Schyns, Maxime V. P. and Huige, Josephine C. B. M. and de Boer, Myrte C. and Tonino, Bart A. R. and Zandvoort, Herman J. A. and Lammert, Kirsti and Parviainen, Helka and Vuorinen, Aino-Maija and Syväranta, Suvi and Vogels, Ruben R. M. and Prins, Wiesje and Coppola, Andrea and Bossa, Nancy and ten Broek, Richard P. G. and Huisman, Henkjan}, - title = {Inter-and Intra-Observer Variability and the Effect of Experience in Cine-MRI for Adhesion Detection}, - doi = {https://doi.org/10.3390/jimaging9030055}, - number = {3}, - pages = {55}, - volume = {9}, - journal = {Journal of Imaging}, - optnote = {RADIOLOGY,DIAG}, - publisher = {MDPI}, - year = {2023}, -} - -@Article{Wild23b, - author = {de Wilde, Bram and Saha, Anindo and ten Broek, Richard PG and Huisman, Henkjan}, - title = {Medical diffusion on a budget: textual inversion for medical image generation}, - journal = {arXiv:2303.13430}, - optnote = {RADIOLOGY,DIAG}, - year = {2023}, -} - -@InProceedings{Bosma23a, - author = {Joeran S. Bosma and Dr{\'e} Peeters and Nat{\'a}lia Alves and Anindo Saha and Zaigham Saghir and Colin Jacobs and Henkjan Huisman}, - booktitle = {Medical Imaging with Deep Learning}, - title = {Reproducibility of Training Deep Learning Models for Medical Image Analysis}, - url = {https://openreview.net/forum?id=MR01DcGST9}, - abstract = {Performance of deep learning algorithms varies due to their development data and training method, but also due to several stochastic processes during training. Due to these random factors, a single training run may not accurately reflect the performance of a given training method. Statistical comparisons in literature between different deep learning training methods typically ignore this performance variation between training runs and incorrectly claim significance of changes in training method. We hypothesize that the impact of such performance variation is substantial, such that it may invalidate biomedical competition leaderboards and some scientific papers. To test this, we investigate the reproducibility of training deep learning algorithms for medical image analysis. We repeated training runs from prior scientific studies: three diagnostic tasks (pancreatic cancer detection in CT, clinically significant prostate cancer detection in MRI, and lung nodule malignancy risk estimation in low-dose CT) and two organ segmentation tasks (pancreas segmentation in CT and prostate segmentation in MRI). A previously published top-performing algorithm for each task was trained multiple times to determine the variance in model performance. For all three diagnostic algorithms, performance variation from retraining was significant compared to data variance. Statistically comparing independently trained algorithms from the same training method using the same dataset should follow the null hypothesis, but we observed claimed significance with a p-value below 0.05 in - of comparisons with conventional testing (paired bootstrapping). We conclude that variance in model performance due to retraining is substantial and should be accounted for.}, - file = {Bosma23a.pdf:pdf\\Bosma23a.pdf:PDF}, - optnote = {RADIOLOGY, DIAG}, - year = {2023}, -} - -@Article{Schu23, - author = {M. Schuurmans and N. Alves and Pierpaolo Vendittelli and H. Huisman and J. Hermans}, - title = {Artificial Intelligence in Pancreatic Ductal Adenocarcinoma Imaging: A Commentary on Potential Future Applications.}, - doi = {10.1053/j.gastro.2023.04.003}, - file = {​​​​​​​​​​​​​Schu23.pdf:pdf\\Schu23.pdf:PDF}, - journal = {Gastroenterology}, - optnote = {DIAG, RADIOLOGY}, - pmid = {37054755}, - year = {2023}, -} - -@Article{Wild23c, - author = {van den Beukel, Bastiaan A. W. and de Wilde, Bram and Joosten, Frank and van Goor, Harry and Venderink, Wulphert and Huisman, Henkjan J. and ten Broek, Richard P. G.}, - title = {Quantifiable Measures of Abdominal Wall Motion for Quality Assessment of Cine-MRI Slices in Detection of Abdominal Adhesions}, - doi = {10.3390/jimaging9050092}, - number = {5}, - volume = {9}, - journal = {Journal of Imaging}, - optnote = {RADIOLOGY,DIAG}, - publisher = {MDPI}, - year = {2023}, -} - -@Article{Bokh23, - author = {J. Bokhorst and I. Nagtegaal and F. Fraggetta and S. Vatrano and W. Mesker and Michael Vieth and J. A. van der Laak and F. Ciompi}, - title = {Deep learning for multi-class semantic segmentation enables colorectal cancer detection and classification in digital pathology images}, - abstract = {In colorectal cancer (CRC), artificial intelligence (AI) can alleviate the laborious task of characterization and reporting on resected biopsies, including polyps, the numbers of which are increasing as a result of CRC population screening programs ongoing in many countries all around the globe. Here, we present an approach to address two major challenges in the automated assessment of CRC histopathology whole-slide images. We present an AI-based method to segment multiple (n=14) tissue compartments in the H &E-stained whole-slide image, which provides a different, more perceptible picture of tissue morphology and composition. We test and compare a panel of state-of-the-art loss functions available for segmentation models, and provide indications about their use in histopathology image segmentation, based on the analysis of (a) a multi-centric cohort of CRC cases from five medical centers in the Netherlands and Germany, and (b) two publicly available datasets on segmentation in CRC. We used the best performing AI model as the basis for a computer-aided diagnosis system that classifies colon biopsies into four main categories that are relevant pathologically. We report the performance of this system on an independent cohort of more than 1000 patients. The results show that with a good segmentation network as a base, a tool can be developed which can support pathologists in the risk stratification of colorectal cancer patients, among other possible uses. We have made the segmentation model available for research use on https://grand-challenge.org/algorithms/colon-tissue-segmentation/.}, - file = {Bokh23.pdf:pdf\\Bokh23.pdf:PDF}, - optnote = {DIAG, PATHOLOGY}, - pmid = {37225743}, - year = {2023}, - journal = NATSCIREP, - volume = {13}, - pages = {8398}, - doi = {10.1038/s41598-023-35491-z}, -} - -@Conference{Dama23, - author = {Marina D'Amato and Maschenka Balkenhol and Mart van Rijthoven and Jeroen van der Laak and Francesco Ciompi}, - booktitle = {MIDL}, - title = {On the robustness of regressing tumor percentage as an explainable detector in histopathology whole-slide images}, - abstract = {In recent years, Multiple Instance Learning (MIL) approaches have gained popularity to address the task of weakly-supervised tumor detection in whole-slide images (WSIs). -However, standard MIL relies on classification methods for tumor detection that require negative control, i.e., tumor-free cases, which are challenging to obtain in real-world clinical scenarios, especially when considering surgical resection specimens. -Inspired by recent work, in this paper we tackle tumor detection via a MIL-like weakly-supervised regression approach to predict the percentage of tumor present in WSIs, a clinically available target that allows to overcome the problem of need for manual annotations or presence of tumor-free slides. -We characterize the quality of such a target by investigating its robustness in the presence of noise on regression percentages and provide explainability through attention maps. We test our approach on breast cancer data from primary tumor and lymph node metastases.}, - optnote = {DIAG, PATHOLOGY}, - year = {2023}, -} - -@Conference{Loma23, - author = {Robin Lomans and Rachel van der Post and Francesco Ciompi}, - booktitle = {MIDL}, - title = {Interactive Cell Detection in H&E-stained slides of Diffuse Gastric Cancer}, - abstract = {We present an interactive detection model to improve the cell annotation workflow of diffuse gastric cancer. -The model relates image and user inputs and is trained to detect three types of cells in diffuse gastric cancer histology. -We measure model multi-class cell detection performance as per-class F1 score and we show that it increases with the number of user input clicks. -Moreover, we show that the proposed interactive annotation approach substantially reduces the number of required user actions needed for complete image annotation, achieving a 17\% reduction for the multi-class case. -Future work will implement an iterative approach to filter out recurring false positives for further performance improvement.}, - optnote = {DIAG, PATHOLOGY}, - year = {2023}, -} - -@Conference{Guev23, - author = {Bryan Cardenas Guevara and Niccolò Marini and Stefano Marchesin and Witali Aswolinskiy and Robert-Jan Schlimbach and Damian Podareanu and Francesco Ciompi}, - booktitle = {MIDL}, - title = {Caption generation from histopathology whole-slide images using pre-trained transformers}, - abstract = {The recent advent of foundation models and large language models has enabled scientists to leverage large-scale knowledge of pretrained (vision) transformers and efficiently tailor it to downstream tasks. This technology can potentially automate multiple aspects of cancer diagnosis in digital pathology, from whole-slide image classification to generating pathology reports while training with pairs of images and text from the diagnostic conclusion. In this work, we orchestrate a set of weakly-supervised transformer-based models with a first aim to address both whole-slide image classification and captioning, addressing the automatic generation of the conclusion of pathology reports in the form of image captions. We report our first results on a multicentric multilingual dataset of colon polyps and biopsies. We achieve high diagnostic accuracy with no supervision and cheap computational adaptation.}, - optnote = {DIAG, PATHOLOGY}, - year = {2023}, -} - -@InProceedings{Spro23, - author = {Joey Spronck and Thijs Gelton and Leander van Eekelen and Joep Bogaerts and Leslie Tessier and Mart van Rijthoven and Lieke van der Woude and Michel van den Heuvel and Willemijn Theelen and Jeroen van der Laak and Francesco Ciompi}, - booktitle = MIDL, - title = {nnUNet meets pathology: bridging the gap for application to whole-slide images and computational biomarkers}, - url = {https://openreview.net/forum?id=aHuwlUu_QR}, - abstract = {Image segmentation is at the core of many tasks in medical imaging, including the engineering of computational biomarkers. While the self-configuring nnUNet framework for image segmentation tasks completely shifted the state-of-the-art in the radiology field, it has never been applied to the field of histopathology, likely due to inherent limitations that nnUNet has when applied to the pathology domain "off-the-shelf". We introduce nnUNet for pathology, built upon the original nnUNet, and engineered domain-specific solutions to bridge the gap between radiology and pathology. Our framework includes critical hyperparameter adjustments and pathology-specific color augmentations, as well as an essential whole-slide image inference pipeline. We developed and validated our approach on non-small cell lung cancer data, showing the effectiveness of nnUNet for pathology over default nnUNet settings, and achieved the first position on the experimental segmentation task of the TIGER challenge on breast cancer data when using our pipeline "out-of-the-box". We also introduce a novel inference uncertainty approach, which proved helpful for the quantification of the tumor-infiltrating lymphocytes biomarker in non-small cell lung cancer biopsies of patients treated with immunotherapy. We coded our framework as a workflow-friendly segmentation tool and made it publicly available.}, - optnote = {DIAG, PATHOLOGY}, - year = {2023}, -} - -@Article{Bokh23a, - author = {J. Bokhorst and I. Nagtegaal and I. Zlobec and H. Dawson and K. Sheahan and F. Simmer and R. Kirsch and Michael Vieth and A. Lugli and J. A. van der Laak and F. Ciompi}, - title = {Semi-Supervised Learning to Automate Tumor Bud Detection in Cytokeratin-Stained Whole-Slide Images of Colorectal Cancer}, - url = {https://www.semanticscholar.org/paper/be741d6f455a941f206de7fecb44f81678f385bf}, - abstract = {Simple Summary Tumor budding is a promising and cost-effective histological biomarker with strong prognostic value in colorectal cancer. It is defined by the presence of single tumor cells or small clusters of cells within the tumor or at the tumor-invasion front. Deep learning based tumor bud assessment can potentially improve diagnostic reproducibility and efficiency. This study aimed to develop a deep learning algorithm to detect tumor buds in cytokeratin-stained images automatically. We used a semi-supervised learning technique to overcome the limitations of a small dataset. Validation of our model showed a sensitivity of 91% and a fairly strong correlation between a human annotator and our deep learning method. We demonstrate that the automated tumor bud count achieves a prognostic value similar to visual estimation. We also investigate new metrics for quantifying buds, such as density and dispersion, and report on their predictive value. Abstract Tumor budding is a histopathological biomarker associated with metastases and adverse survival outcomes in colorectal carcinoma (CRC) patients. It is characterized by the presence of single tumor cells or small clusters of cells within the tumor or at the tumor-invasion front. In order to obtain a tumor budding score for a patient, the region with the highest tumor bud density must first be visually identified by a pathologist, after which buds will be counted in the chosen hotspot field. The automation of this process will expectedly increase efficiency and reproducibility. Here, we present a deep learning convolutional neural network model that automates the above procedure. For model training, we used a semi-supervised learning method, to maximize the detection performance despite the limited amount of labeled training data. The model was tested on an independent dataset in which human- and machine-selected hotspots were mapped in relation to each other and manual and machine detected tumor bud numbers in the manually selected fields were compared. We report the results of the proposed method in comparison with visual assessment by pathologists. We show that the automated tumor bud count achieves a prognostic value comparable with visual estimation, while based on an objective and reproducible quantification. We also explore novel metrics to quantify buds such as density and dispersion and report their prognostic value. We have made the model available for research use on the grand-challenge platform.}, - file = {Bokh23a.pdf:pdf\\Bokh23a.pdf:PDF}, - optnote = {DIAG, PATHOLOGY}, - journal = {Cancers}, - volume = {15}, - issue= {7}, - pages = {2079}, - pmid = {37046742}, - year = {2023}, - doi = {10.3390/cancers15072079}, -} - -@Article{Laar23, - author = {Laarhuis, Babette I. and Janssen, Marcel J. R. and Simons, Michiel and van Kalmthout, Ludwike W. M. and van der Doelen, Maarten J. and Peters, Steffie M. B. and Westdorp, Harm and van Oort, Inge M. and Litjens, Geert and Gotthardt, Martin and Nagarajah, James and Mehra, Niven and Privé, Bastiaan M.}, - year = {2023}, - month = {4}, - journal = CGC, - title = {Tumoral Ki67 and PSMA Expression in Fresh Pre-PSMA-RLT Biopsies and Its Relation With PSMA-PET Imaging and Outcomes of PSMA-RLT in Patients With mCRPC.}, - doi = {10.1016/j.clgc.2023.04.003}, - issn = {1938-0682}, - pubstate = {aheadofprint}, - abstract = {Prostate specific membrane antigen (PSMA) directed radioligand therapy (RLT) is a novel therapy for metastatic castration-resistant prostate cancer (mCRPC) patients. However, it is still poorly understood why approximately 40% of the patients does not respond to PSMA-RLT. The aims of this study were to evaluate the pretreatment PSMA expression on immunohistochemistry (IHC) and PSMA uptake on PET/CT imaging in mCRPC patients who underwent PSMA-RLT. We correlated these parameters and a cell proliferation marker (Ki67) to the therapeutic efficacy of PSMA-RLT. In this retrospective study, mCRPC patients who underwent PSMA-RLT were analyzed. Patients biopsies were scored for immunohistochemical Ki67 expression, PSMA staining intensity and percentage of cells with PSMA expression. Moreover, the PSMA tracer uptake of the tumor lesion(s) and healthy organs on PET/CT imaging was assessed. The primary outcome was to evaluate the association between histological PSMA protein expression of tumor in pre-PSMA-RLT biopsies and the PSMA uptake on PSMA PET/CT imaging of the biopsied lesion. Secondary outcomes were to assess the relationship between PSMA expression and Ki67 on IHC and the progression free survival (PFS) and overall survival (OS) following PSMA-RLT. In total, 22 mCRPC patients were included in this study. Nineteen (86%) patients showed a high and homogenous PSMA expression of >80% on IHC. Three (14%) patients had low PSMA expression on IHC. Although there was limited PSMA uptake on PET/CT imaging, these 3 patients had lower PSMA uptake on PET/CT imaging compared to the patients with high PSMA expression on IHC. Yet, no correlation was found between PSMA uptake on PET/CT imaging and PSMA expression on IHC (SUVmax: R  = 0.046 and SUVavg: R  = 0.036). The 3 patients had a shorter PFS compared to the patients with high PSMA expression on IHC (HR: 4.76, 95% CI: 1.14-19.99; P = .033). Patients with low Ki67 expression had a longer PFS and OS compared to patients with a high Ki67 expression (HR: 0.40, 95% CI: 0.15-1.06; P = .013) CONCLUSION: The PSMA uptake on PSMA-PET/CT generally followed the PSMA expression on IHC. However, heterogeneity may be missed on PSMA-PET/CT. Immunohistochemical PSMA and Ki67 expression in fresh tumor biopsies, may contribute to predict treatment efficacy of PSMA-RLT in mCRPC patients. This needs to be further explored in prospective cohorts.}, - citation-subset = {IM}, - country = {United States}, - file = {:pdf/Laar23.pdf:PDF}, - issn-linking = {1558-7673}, - keywords = {Immunohistochemistry; Prostate cancer; Prostate-specific membrane antigen; Radioligand therapy}, - nlm-id = {101260955}, - optnote = {RADIOLOGY,DIAG,PATHOLOGY}, - owner = {NLM}, - pii = {S1558-7673(23)00090-3}, - pmid = {37164814}, - pubmodel = {Print-Electronic}, - revised = {2023-05-10}, -} - -@InProceedings{Scho23, - author = {Schouten, Daan and Litjens, Geert}, - booktitle = MI, - title = {PythoStitcher: an iterative approach for stitching digitized tissue fragments into full resolution whole-mount reconstructions}, - doi = {10.1117/12.2652512}, - pages = {1247118}, - series = {#SPIE#}, - volume = {12471}, - abstract = {In histopathology, whole-mount sections (WMS) can be utilized to capture entire cross-sections of excised tissue, thereby reducing the number of slides per case, preserving tissue context, and minimizing cutting artefacts. Additionally, the use of WMS allows for easier correlation of histology and pre-operative imaging. However, in digital pathology, WMS are not frequently acquired due to the limited availability of whole-slide scanners that can scan double-width glass slides. Moreover, whole-mounts may still be too large to fit on a single double-width glass slide, and archival material is typically stored as tissue fragments. In this work, we present PythoStitcher, an improved version of the AutoStitcher algorithm for automatically stitching multiple tissue fragments and constructing a full-resolution WMS. PythoStitcher is based on a genetic algorithm that iteratively optimizes the affine transformation matrix for each tissue …}, - file = {Scho23.pdf:pdf\\Scho23.pdf:PDF}, - optjournal = {Medical Imaging 2023: Digital and Computational Pathology}, - optlocation = {San Diego, CA, USA}, - optnote = {RADIOLOGY,DIAG,PATHOLOGY}, - optnumber = {1}, - optpublisher = {SPIE}, - year = {2023}, -} - -@Article{Eeke23, - author = {van Eekelen, Leander and Litjens, Geert and Hebeda, Konnie}, - year = {2023}, - month = {2}, - journal = PATHOB, - title = {Artificial intelligence in bone marrow histological diagnostics: potential applications and challenges.}, - doi = {10.1159/000529701}, - issn = {1423-0291}, - pubstate = {aheadofprint}, - abstract = {The expanding digitalization of routine diagnostic histological slides holds a potential to apply artificial intelligence (AI) to pathology, including bone marrow (BM) histology. In this perspective we describe potential tasks in diagnostics that can be supported, investigations that can be guided and questions that can be answered by the future application of AI on whole slide images of BM biopsies. These range from characterization of cell lineages and quantification of cells and stromal structures to disease prediction. First glimpses show an exciting potential to detect subtle phenotypic changes with AI that are due to specific genotypes. The discussion is illustrated by examples of current AI research using BM biopsy slides. In addition, we briefly discuss current challenges for implementation of AI-supported diagnostics.}, - citation-subset = {IM}, - country = {Switzerland}, - file = {:pdf/Eeke23.pdf:PDF}, - issn-linking = {1015-2008}, - nlm-id = {9007504}, - optnote = {RADIOLOGY,DIAG,PATHOLOGY}, - owner = {NLM}, - pii = {000529701}, - pmid = {36791682}, - pubmodel = {Print-Electronic}, - revised = {2023-02-15}, -} - -@Article{Linm23, - author = {Linmans, Jasper and Elfwing, Stefan and van der Laak, Jeroen and Litjens, Geert}, - year = {2023}, - month = {1}, - journal = MIA, - title = {Predictive uncertainty estimation for out-of-distribution detection in digital pathology.}, - doi = {10.1016/j.media.2022.102655}, - issn = {1361-8423}, - pages = {102655}, - pubstate = {ppublish}, - volume = {83}, - abstract = {Machine learning model deployment in clinical practice demands real-time risk assessment to identify situations in which the model is uncertain. Once deployed, models should be accurate for classes seen during training while providing informative estimates of uncertainty to flag abnormalities and unseen classes for further analysis. Although recent developments in uncertainty estimation have resulted in an increasing number of methods, a rigorous empirical evaluation of their performance on large-scale digital pathology datasets is lacking. This work provides a benchmark for evaluating prevalent methods on multiple datasets by comparing the uncertainty estimates on both in-distribution and realistic near and far out-of-distribution (OOD) data on a whole-slide level. To this end, we aggregate uncertainty values from patch-based classifiers to whole-slide level uncertainty scores. We show that results found in classical computer vision benchmarks do not always translate to the medical imaging setting. Specifically, we demonstrate that deep ensembles perform best at detecting far-OOD data but can be outperformed on a more challenging near-OOD detection task by multi-head ensembles trained for optimal ensemble diversity. Furthermore, we demonstrate the harmful impact OOD data can have on the performance of deployed machine learning models. Overall, we show that uncertainty estimates can be used to discriminate in-distribution from OOD data with high AUC scores. Still, model deployment might require careful tuning based on prior knowledge of prospective OOD data.}, - citation-subset = {IM}, - completed = {2022-12-21}, - country = {Netherlands}, - file = {:pdf/Linm23.pdf:PDF}, - issn-linking = {1361-8415}, - keywords = {Humans; Prospective Studies; Machine Learning; Pathology; Deep learning; Ensemble diversity; Histopathology; Multi-heads; Out-of-distribution detection; Uncertainty estimation}, - nlm-id = {9713490}, - optnote = {RADIOLOGY,DIAG,PATHOLOGY}, - owner = {NLM}, - pii = {S1361-8415(22)00283-3}, - pmid = {36306568}, - pubmodel = {Print-Electronic}, - revised = {2023-02-02}, -} - -@Article{Rein23, - author = {Reinke, Annika and Tizabi, Minu D. and Baumgartner, Michael and Eisenmann, Matthias and Heckmann-Nötzel, Doreen and Kavur, A. Emre and Rädsch, Tim and Sudre, Carole H. and Acion, Laura and Antonelli, Michela and Arbel, Tal and Bakas, Spyridon and Benis, Arriel and Blaschko, Matthew and Büttner, Florian and Cardoso, M. Jorge and Cheplygina, Veronika and Chen, Jianxu and Christodoulou, Evangelia and Cimini, Beth A. and Collins, Gary S. and Farahani, Keyvan and Ferrer, Luciana and Galdran, Adrian and van Ginneken, Bram and Glocker, Ben and Godau, Patrick and Haase, Robert and Hashimoto, Daniel A. and Hoffman, Michael M. and Huisman, Merel and Isensee, Fabian and Jannin, Pierre and Kahn, Charles E. and Kainmueller, Dagmar and Kainz, Bernhard and Karargyris, Alexandros and Karthikesalingam, Alan and Kenngott, Hannes and Kleesiek, Jens and Kofler, Florian and Kooi, Thijs and Kopp-Schneider, Annette and Kozubek, Michal and Kreshuk, Anna and Kurc, Tahsin and Landman, Bennett A. and Litjens, Geert and Madani, Amin and Maier-Hein, Klaus and Martel, Anne L. and Mattson, Peter and Meijering, Erik and Menze, Bjoern and Moons, Karel G. M. and Müller, Henning and Nichyporuk, Brennan and Nickel, Felix and Petersen, Jens and Rafelski, Susanne M. and Rajpoot, Nasir and Reyes, Mauricio and Riegler, Michael A. and Rieke, Nicola and Saez-Rodriguez, Julio and Sánchez, Clara I. and Shetty, Shravya and van Smeden, Maarten and Summers, Ronald M. and Taha, Abdel A. and Tiulpin, Aleksei and Tsaftaris, Sotirios A. and Van Calster, Ben and Varoquaux, Gaël and Wiesenfarth, Manuel and Yaniv, Ziv R. and Jäger, Paul F. and Maier-Hein, Lena}, - year = {2023}, - month = {2}, - title = {Understanding metric-related pitfalls in image analysis validation}, - abstract = {Validation metrics are key for the reliable tracking of scientific progress and for bridging the current chasm between artificial intelligence (AI) research and its translation into practice. However, increasing evidence shows that particularly in image analysis, metrics are often chosen inadequately in relation to the underlying research problem. This could be attributed to a lack of accessibility of metric-related knowledge: While taking into account the individual strengths, weaknesses, and limitations of validation metrics is a critical prerequisite to making educated choices, the relevant knowledge is currently scattered and poorly accessible to individual researchers. Based on a multi-stage Delphi process conducted by a multidisciplinary expert consortium as well as extensive community feedback, the present work provides the first reliable and comprehensive common point of access to information on pitfalls related to validation metrics in image analysis. Focusing on biomedical image analysis but with the potential of transfer to other fields, the addressed pitfalls generalize across application domains and are categorized according to a newly created, domain-agnostic taxonomy. To facilitate comprehension, illustrations and specific examples accompany each pitfall. As a structured body of information accessible to researchers of all levels of expertise, this work enhances global comprehension of a key topic in image analysis validation.}, - file = {:pdf/Rein23.pdf:PDF}, - journal = {arXiv:2302.01790}, - optnote = {RADIOLOGY,DIAG,PATHOLOGY}, -} - -@Article{Jark22, - author = {Jarkman, Sofia and Karlberg, Micael and Pocevičiūtė, Milda and Bodén, Anna and Bándi, Péter and Litjens, Geert and Lundström, Claes and Treanor, Darren and van der Laak, Jeroen}, - year = {2022}, - month = {11}, - journal = CANCERS, - title = {Generalization of Deep Learning in Digital Pathology: Experience in Breast Cancer Metastasis Detection.}, - doi = {10.3390/cancers14215424}, - issn = {2072-6694}, - issue = {21}, - pubstate = {epublish}, - volume = {14}, - abstract = {Poor generalizability is a major barrier to clinical implementation of artificial intelligence in digital pathology. The aim of this study was to test the generalizability of a pretrained deep learning model to a new diagnostic setting and to a small change in surgical indication. A deep learning model for breast cancer metastases detection in sentinel lymph nodes, trained on CAMELYON multicenter data, was used as a base model, and achieved an AUC of 0.969 (95% CI 0.926-0.998) and FROC of 0.838 (95% CI 0.757-0.913) on CAMELYON16 test data. On local sentinel node data, the base model performance dropped to AUC 0.929 (95% CI 0.800-0.998) and FROC 0.744 (95% CI 0.566-0.912). On data with a change in surgical indication (axillary dissections) the base model performance indicated an even larger drop with a FROC of 0.503 (95%CI 0.201-0.911). The model was retrained with addition of local data, resulting in about a 4% increase for both AUC and FROC for sentinel nodes, and an increase of 11% in AUC and 49% in FROC for axillary nodes. Pathologist qualitative evaluation of the retrained model´s output showed no missed positive slides. False positives, false negatives and one previously undetected micro-metastasis were observed. The study highlights the generalization challenge even when using a multicenter trained model, and that a small change in indication can considerably impact the model´s performance.}, - country = {Switzerland}, - file = {:pdf/Jark22.pdf:PDF}, - issn-linking = {2072-6694}, - keywords = {artificial intelligence; breast cancer; computational pathology; deep learning; digital pathology; generalization; lymph node metastases}, - nlm-id = {101526829}, - optnote = {DIAG, RADIOLOGY, PATHOLOGY}, - owner = {NLM}, - pii = {5424}, - pmc = {PMC9659028}, - pmid = {36358842}, - pubmodel = {Electronic}, - revised = {2022-11-17}, -} - -@InProceedings{Vend23, - author = {Vendittelli, Pierpaolo and Bokhorst, John-Melle and Smeets, Esther Markus and Kryklyva, Valentyna and Brosens, Lodewijk and Verbeke, Caroline and Litjens, Geert}, - booktitle = MIDL, - title = {Automatic quantification of {TSR} as a prognostic marker for pancreatic cancer.}, - url = {https://openreview.net/forum?id=Dtz_iaUpGc}, - abstract = {The current diagnostic and outcome prediction methods for pancreatic cancer lack prognostic power. As such, identifying novel biomarkers using machine learning has become of increasing interest. In this study, we introduce a novel method for estimating the tumor-stroma ratio (TSR) in whole slide images (WSIs) of pancreatic tissue and assess its potential as a prognostic biomarker. A multi-step strategy for estimating TSR is proposed, including epithelium segmentation based on an immunohistochemical reference standard, a coarse pancreatic cancer segmentation, and a post-processing pipeline for TSR quantification. The resultant segmentation models are validated on external test sets using the Dice coefficient, and additionally, the TSR's potential as a prognostic factor is assessed using survival analysis, resulting in a C-index of 0.61.}, - file = {:pdf/Vend23.pdf:PDF}, - optnote = {RADIOLOGY,DIAG,PATHOLOGY}, - year = {2023}, -} - -@PhdThesis{Pate23, - author = {Ajay Patel}, - date = {2023}, - institution = {Radboud University, Nijmegen}, - title = {Automated Image Analysis of Cranial Non-Contrast CT}, - url = {https://repository.ubn.ru.nl/handle/2066/292990}, - abstract = {Stroke and intracranial hemorrhage (a brain bleed) are serious medical conditions that require fast and accurate diagnosis to aid clinicians in making treatment decisions. Computed tomography (CT) is a widely available and fast imaging technique used for diagnosis, but it relies on interpretation by a clinician. To aid in the diagnosis of stroke and intracranial hemorrhage, artificial intelligence algorithms for computer-aided diagnosis (CAD) have been developed to automatically analyze CT images. This research presents different methods that demonstrate accurate segmentations of large cerebral anatomy and the ability to quantify and segment intracerebral hemorrhages for further analysis. Whole 3D non-contrast CT images were also analyzed automatically to identify the presence of intracranial hemorrhage with high sensitivity. With further development, CAD systems will be able to assist physicians in diagnosing and predicting outcomes of stroke and intracranial hemorrhage in clinical settings.}, - copromotor = {R. Manniesing}, - file = {:pdf/Pate23.pdf:PDF}, - journal = {PhD thesis}, - optnote = {DIAG, RADIOLOGY}, - promotor = {B. van Ginneken and M. Prokop}, - school = {Radboud University, Nijmegen}, - year = {2023}, -} - -@PhdThesis{Call23, - author = {Erdi \c{C}all\i}, - date = {2023}, - institution = {Radboud University, Nijmegen}, - title = {Deep learning methods towards clinically applicable Chest X-ray interpretation systems}, - url = {https://repository.ubn.ru.nl/handle/2066/292983}, - abstract = {Chest X-Ray is a very commonly acquired, low-cost imaging examination which is sensitive for multiple pathologies while exposing the patient to very little ionizing radiation. The number of medical imaging examinations being acquired increases year-on-year while there is a global shortage of radiologists qualified to interpret the images. This research presents findings on the use of deep learning for interpretation of chest X-Ray images. An in-depth review of the current state-of-the-art is provided as well as investigation of handling of label noise, outlier detection, explainable identification of emphysema and detection of COVID-19 with robustness to missing data.}, - copromotor = {K. Murphy}, - file = {:pdf/Calli23.pdf:PDF}, - journal = {PhD thesis}, - optnote = {DIAG, RADIOLOGY}, - promotor = {B. van Ginneken}, - school = {Radboud University, Nijmegen}, - year = {2023}, -} - -@PhdThesis{Leeu23c, - author = {Kicky G. van Leeuwen}, - date = {2023}, - institution = {Radboud University, Nijmegen}, - title = {Validation and implementation of commercial artificial intelligence software for radiology}, - url = {https://repository.ubn.ru.nl/handle/2066/295128}, - abstract = {The aim of this thesis is to increase transparency of the AI software applications for the radiology market: the medical specialty which currently covers 75% of all approved medical AI software. The focus is on products available for clinical use in Europe, in other words, products that are CE marked. We discuss the potential use cases of AI in radiology, map commercially available AI products, independently assess the performance of products, and measure and model the (potential) added value. With the insights we have gained and publicly shared, we enable more informed decision-making by AI purchasers, users, investors, and creators. Furthermore, it encourages use and development of AI that is safe and of value to society. -The key contributions of this research are: -- Three years of publicly sharing of information to a global audience on commercially available AI products, verified regulatory clearance information, product specifications, and scientific evidence, through www.AIforRadiology.com and associated monthly newsletter. -- Initiating the Dutch Radiology AI-network connecting "AI-champions" among hospitals to share experiences and to enable the yearly inquiry on the clinical use of commercial AI. -- Development of a framework for the independent and objective validation of commercially available AI products and applying this to ten products, for two different use cases, on data from seven medical centers. With this framework, we make validation more efficient and impartial, enabling informed purchasing or reimbursement decisions. -- One of the first demonstrations of how an early health technology assessment can be performed to demonstrate the value of an AI product before implementation.}, - copromotor = {M.J.C.M. Rutten, Dr. M. de Rooij, S. Schalekamp}, - file = {:pdf/Leeu23c.pdf:PDF}, - journal = {PhD thesis}, - optnote = {DIAG, RADIOLOGY}, - promotor = {B. van Ginneken}, - school = {Radboud University, Nijmegen}, - year = {2023}, -} - - -@Conference{Loma23a, - author = {Robin Lomans and Jeroen van der Laak and Iris Nagtegaal and Francesco Ciompi and Rachel van der Post}, - booktitle = {European Congress of Pathology}, - title = {Deep learning for multi-class cell detection in H&E-stained slides of diffuse gastric cancer}, - abstract = {Background & objective -Diffuse gastric cancer (DGC) is characterized by poorly cohesive cells which are difficult to detect. We propose the first deep learning model to detect classical signet ring cells (SRCs), atypical SRCs, and poorly differentiated cells in H&E-stained slides of DGC. - -Methods -We collected slides from 9 patients with hereditary DGC, resulting in 105 and 3 whole-slide images (WSIs) of gastric resections and biopsies, respectively. The three target cell types were annotated, resulting in 24,695 cell-level annotations. We trained a deep learning model with the Faster-RCNN architecture using 99 WSIs in the development set. - -Results -The algorithm was tested on 9 WSIs in the independent validation set. Model predictions were counted as correct if they were within a 15-micron radius from the expert reference annotations. For evaluation, we split the detection task into two components: class-independent cell localization (recognition of any tumor cell type) and cell-type classification (categorizing localized cells as the correct types). We found (average) F1 scores of 0.69 and 0.93 for the localization and classification tasks, respectively. Thus, we observe that the algorithm does not generally misclassify cells, but rather, the errors mainly arise from missing cells or false positive predictions of cells that do not belong to the three target classes. - -Conclusion -Future work will focus on improving the cell localization performance of the algorithm. Cell localization of the three target classes will be an important task in a clinical application of our model, in which it could be used to improve the detection of DGC lesions among large sets of slides. Moreover, the algorithm will allow for quantitative assessment of DGC patterns, potentially giving new insights in specific morphological features of DGC such as patterns of spatial cell distributions.}, - optnote = {DIAG, PATHOLOGY}, - year = {2023}, -} - -@MastersThesis{Geur23, - author = {Geurtjens, Ruben and Peeters, Dr\'{e} and Jacobs, Colin}, - title = {Self-supervised Out-of-Distribution detection for medical imaging}, - abstract = {Out-of-distribution (OOD) detection is an important aspect of deep learning-based medicalimaging approaches for ensuring the safety and accuracy of diagnostic tools. In this paper,we investigate the effectiveness of three self-supervised learning techniques for OOD detec-tion in both a labeled RadboudXR dataset and a clinical dataset with OOD data but nolabels. Specifically, we explore two predictive self-supervised techniques and one contrastiveself-supervised technique and evaluate their ability to detect OOD samples. Furthermore,we evaluate the performance of the state-of-the-art vision transformer model on medicaldata both as a standalone method and as the backbone of a self-supervised task. Our resultsindicate that the contrastive self-supervised method Bootstrap-Your-Own-latent (BYOL)and vision transformer model were not effective in detecting OOD samples. However, thepredictive methods performed well on both 2D and 3D data, and demonstrated scalabilityin difficulty. These findings suggest the potential utility of self-supervised learning tech-niques for OOD detection in medical imaging. When determining an OOD cut-off valuefor clinical usage there are, however, problems with separation between datasets. Thesechallenges suggest that further research is needed before these techniques can be adoptedfor clinical usage.}, - file = {Geur23.pdf:pdf\\Geur23.pdf:PDF}, - journal = {Master thesis}, - optnote = {DIAG}, - school = {Radboud University Medical Center}, - year = {2023}, -} - -@Article{Hend23a, - author = {Hendrix, Ward and Rutten, Matthieu and Hendrix, Nils and van Ginneken, Bram and Schaefer-Prokop, Cornelia and Scholten, Ernst T. and Prokop, Mathias and Jacobs, Colin}, - title = {Trends in the incidence of pulmonary nodules in chest computed tomography: 10-year results from two Dutch hospitals}, - doi = {10.1007/s00330-023-09826-3}, - url = {https://doi.org/10.1007/s00330-023-09826-3}, - abstract = {Objective -To study trends in the incidence of reported pulmonary nodules and stage I lung cancer in chest CT. - -Methods -We analyzed the trends in the incidence of detected pulmonary nodules and stage I lung cancer in chest CT scans in the period between 2008 and 2019. Imaging metadata and radiology reports from all chest CT studies were collected from two large Dutch hospitals. A natural language processing algorithm was developed to identify studies with any reported pulmonary nodule. - -Results -Between 2008 and 2019, a total of 74,803 patients underwent 166,688 chest CT examinations at both hospitals combined. During this period, the annual number of chest CT scans increased from 9955 scans in 6845 patients in 2008 to 20,476 scans in 13,286 patients in 2019. The proportion of patients in whom nodules (old or new) were reported increased from 38% (2595/6845) in 2008 to 50% (6654/13,286) in 2019. The proportion of patients in whom significant new nodules (≥ 5 mm) were reported increased from 9% (608/6954) in 2010 to 17% (1660/9883) in 2017. The number of patients with new nodules and corresponding stage I lung cancer diagnosis tripled and their proportion doubled, from 0.4% (26/6954) in 2010 to 0.8% (78/9883) in 2017. - -Conclusion -The identification of incidental pulmonary nodules in chest CT has steadily increased over the past decade and has been accompanied by more stage I lung cancer diagnoses. - -Clinical relevance statement -These findings stress the importance of identifying and efficiently managing incidental pulmonary nodules in routine clinical practice.}, - file = {Hend23a.pdf:pdf\\Hend23a.pdf:PDF}, - journal = ER, - volume = {33}, - number = {11}, - optnote = {DIAG, RADIOLOGY}, - pages = {8279-8288}, - pmid = {37338552}, - year = {2023}, -} - -@article{Bosma23, - title={Semi-supervised Learning with Report-guided Pseudo Labels for Deep Learning-based Prostate Cancer Detection Using Biparametric MRI}, - author={Bosma, Joeran S. and Saha, Anindo and Hosseinzadeh, Matin and Slootweg, Ivan and de Rooij, Maarten and Huisman, Henkjan}, - journal={Radiology: Artificial Intelligence}, - optnote = {DIAG, RADIOLOGY}, - doi = {10.1148/ryai.230031}, - url = {https://pubs.rsna.org/doi/full/10.1148/ryai.230031}, - pages={e230031}, - year={2023}, - publisher={Radiological Society of North America}, -} - -@Article{Graa23a, - author = {van der Graaf, Jasper W. and van Hooff, Miranda L. and Buckens, Constantinus F. M. and Rutten, Matthieu and van Susante, Job L. C. and Kroeze, Robert Jan and de Kleuver, Marinus and van Ginneken, Bram and Lessmann, Nikolas}, - title = {Lumbar spine segmentation in MR images: a dataset and a public benchmark}, - journal = {arXiv:2306.12217}, - optnote = {RADIOLOGY,DIAG}, - year = {2023}, -} - -@article{Thij23, - author = {Thijssen, Linda C.P. and de Rooij, Maarten and Barentsz, Jelle O. and Huisman, Henkjan J.}, - title = {Radiomics based automated quality assessment for T2W prostate MR images}, - doi = {10.1016/j.ejrad.2023.110928}, - year = {2023}, - abstract = {Purpose: The guidelines for prostate cancer recommend the use of MRI in the prostate cancer pathway. Due to the variability in prostate MR image quality, the reliability of this technique in the detection of prostate cancer is highly variable in clinical practice. This leads to the need for an objective and automated assessment of image quality to ensure an adequate acquisition and hereby to improve the reliability of MRI. The aim of this study is to investigate the feasibility of Blind/referenceless image spatial quality evaluator (Brisque) and radiomics in automated image quality assessment of T2-weighted (T2W) images. Method: Anonymized axial T2W images from 140 patients were scored for quality using a five-point Likert scale (low, suboptimal, acceptable, good, very good quality) in consensus by two readers. Images were dichotomized into clinically acceptable (very good, good and acceptable quality images) and clinically unacceptable (low and suboptimal quality images) in order to train and verify the model. Radiomics and Brisque features were extracted from a central cuboid volume including the prostate. A reduced feature set was used to fit a Linear Discriminant Analysis (LDA) model to predict image quality. Two hundred times repeated 5-fold cross-validation was used to train the model and test performance by assessing the classification accuracy, the discrimination accuracy as receiver operating curve - area under curve (ROC-AUC), and by generating confusion matrices. Results: Thirty-four images were classified as clinically unacceptable and 106 were classified as clinically acceptable. The accuracy of the independent test set (mean ± standard deviation) was 85.4 ± 5.5%. The ROC-AUC was 0.856 (0.851 – 0.861) (mean; 95% confidence interval). Conclusions:Radiomics AI can automatically detect a significant portion of T2W images of suboptimal image quality. This can help improve image quality at the time of acquisition, thus reducing repeat scans and improving diagnostic accuracy.}, - url = {http://dx.doi.org/10.1016/j.ejrad.2023.110928}, - file = {Thij23.pdf:pdf\Thij23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {European Journal of Radiology}, - citation-count = {0}, - automatic = {yes} -} - -@Article{Venk23, - author = {Venkadesh, Kiran Vaidhya and Aleef, Tajwar Abrar and Scholten, Ernst T. and Saghir, Zaigham and Silva, Mario and Sverzellati, Nicola and Pastorino, Ugo and van Ginneken, Bram and Prokop, Mathias and Jacobs, Colin}, - title = {Prior CT Improves Deep Learning for Malignancy Risk Estimation of Screening-detected Pulmonary Nodules}, - doi = {10.1148/radiol.223308}, - number = {2}, - pages = {e223308}, - url = {http://dx.doi.org/10.1148/radiol.223308}, - volume = {308}, - abstract = {Background -Prior chest CT provides valuable temporal information (eg, changes in nodule size or appearance) to accurately estimate malignancy risk. - -Purpose -To develop a deep learning (DL) algorithm that uses a current and prior low-dose CT examination to estimate 3-year malignancy risk of pulmonary nodules. - -Materials and Methods -In this retrospective study, the algorithm was trained using National Lung Screening Trial data (collected from 2002 to 2004), wherein patients were imaged at most 2 years apart, and evaluated with two external test sets from the Danish Lung Cancer Screening Trial (DLCST) and the Multicentric Italian Lung Detection Trial (MILD), collected in 2004–2010 and 2005–2014, respectively. Performance was evaluated using area under the receiver operating characteristic curve (AUC) on cancer-enriched subsets with size-matched benign nodules imaged 1 and 2 years apart from DLCST and MILD, respectively. The algorithm was compared with a validated DL algorithm that only processed a single CT examination and the Pan-Canadian Early Lung Cancer Detection Study (PanCan) model. - -Results -The training set included 10 508 nodules (422 malignant) in 4902 trial participants (mean age, 64 years ± 5 [SD]; 2778 men). The size-matched external test sets included 129 nodules (43 malignant) and 126 nodules (42 malignant). The algorithm achieved AUCs of 0.91 (95% CI: 0.85, 0.97) and 0.94 (95% CI: 0.89, 0.98). It significantly outperformed the DL algorithm that only processed a single CT examination (AUC, 0.85 [95% CI: 0.78, 0.92; P = .002]; and AUC, 0.89 [95% CI: 0.84, 0.95; P = .01]) and the PanCan model (AUC, 0.64 [95% CI: 0.53, 0.74; P < .001]; and AUC, 0.63 [95% CI: 0.52, 0.74; P < .001]). - -Conclusion -A DL algorithm using current and prior low-dose CT examinations was more effective at estimating 3-year malignancy risk of pulmonary nodules than established models that only use a single CT examination.}, - algorithm = {https://grand-challenge.org/algorithms/temporal-nodule-analysis/}, - citation-count = {0}, - file = {Venk23.pdf:pdf\Venk23.pdf:PDF}, - journal = {Radiology}, - optnote = {DIAG, RADIOLOGY}, - pmid = {37526548}, - year = {2023}, -} - -@article{Leeu23b, -author={K.G.van Leeuwen and M.J. Becks and D. Grob and F. de Lange and J.H.E. Rutten and S. Schalekamp and M.J.C.M. Rutten and B. van Ginneken and M. de Rooij and F.J.A. Meijer}, -title={AI-support for the detection of intracranial large vessel occlusions: One-year prospective evaluation}, -doi={10.1016/j.heliyon.2023.e19065}, -abstract={Purpose -Few studies have evaluated real-world performance of radiological AI-tools in clinical practice. Over one-year, we prospectively evaluated the use of AI software to support the detection of intracranial large vessel occlusions (LVO) on CT angiography (CTA). -Method -Quantitative measures (user log-in attempts, AI standalone performance) and qualitative data (user surveys) were reviewed by a key-user group at three timepoints. A total of 491 CTA studies of 460 patients were included for analysis. -Results -The overall accuracy of the AI-tool for LVO detection and localization was 87.6\%, sensitivity 69.1\% and specificity 91.2\%. Out of 81 LVOs, 31 of 34 (91\%) M1 occlusions were detected correctly, 19 of 38 (50\%) M2 occlusions, and 6 of 9 (67\%) ICA occlusions. The product was considered user-friendly. The diagnostic confidence of the users for LVO detection remained the same over the year. The last measured net promotor score was −56\%. The use of the AI-tool fluctuated over the year with a declining trend. -Conclusions -Our pragmatic approach of evaluating the AI-tool used in clinical practice, helped us to monitor the usage, to estimate the perceived added value by the users of the AI-tool, and to make an informed decision about the continuation of the use of the AI-tool.}, -citation-count = {0}, - file = {Leeu23b.pdf:pdf\Leeu23b.pdf:PDF}, - journal = {Heliyon}, - issue = {8}, - volume = {9}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -} - - -@article{Leeu23a, - author = {van Leeuwen, Kicky G. and de Rooij, Maarten and Schalekamp, Steven and van Ginneken, Bram and Rutten, Matthieu J. C. M.}, - title = {Clinical use of artificial intelligence products for radiology in the Netherlands between 2020 and 2022}, - doi = {10.1007/s00330-023-09991-5}, - url = {http://dx.doi.org/10.1007/s00330-023-09991-5}, - abstract = {Abstract - Objectives - To map the clinical use of CE-marked artificial intelligence (AI)-based software in radiology departments in the Netherlands (n = 69) between 2020 and 2022. - - Materials and methods - Our AI network (one radiologist or AI representative per Dutch hospital organization) received a questionnaire each spring from 2020 to 2022 about AI product usage, financing, and obstacles to adoption. Products that were not listed on www.AIforRadiology.com by July 2022 were excluded from the analysis. - - Results - The number of respondents was 43 in 2020, 36 in 2021, and 33 in 2022. The number of departments using AI has been growing steadily (2020: 14, 2021: 19, 2022: 23). The diversity (2020: 7, 2021: 18, 2022: 34) and the number of total implementations (2020: 19, 2021: 38, 2022: 68) has rapidly increased. Seven implementations were discontinued in 2022. Four hospital organizations said to use an AI platform or marketplace for the deployment of AI solutions. AI is mostly used to support chest CT (17), neuro CT (17), and musculoskeletal radiograph (12) analysis. The budget for AI was reserved in 13 of the responding centers in both 2021 and 2022. The most important obstacles to the adoption of AI remained costs and IT integration. Of the respondents, 28% stated that the implemented AI products realized health improvement and 32% assumed both health improvement and cost savings. - - Conclusion - The adoption of AI products in radiology departments in the Netherlands is showing common signs of a developing market. The major obstacles to reaching widespread adoption are a lack of financial resources and IT integration difficulties. - - Clinical relevance statement - The clinical impact of AI starts with its adoption in daily clinical practice. Increased transparency around AI products being adopted, implementation obstacles, and impact may inspire increased collaboration and improved decision-making around the implementation and financing of AI products. - - Key Points - The adoption of artificial intelligence products for radiology has steadily increased since 2020 to at least a third of the centers using AI in clinical practice in the Netherlands in 2022. - The main areas in which artificial intelligence products are used are lung nodule detection on CT, aided stroke diagnosis, and bone age prediction. - The majority of respondents experienced added value (decreased costs and/or improved outcomes) from using artificial intelligence-based software; however, major obstacles to adoption remain the costs and IT-related difficulties.}, - citation-count = {0}, - file = {Leeu23a.pdf:pdf\Leeu23a.pdf:PDF}, - journal = {European Radiology}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@MastersThesis{Vyaw23, - author = {Vyawahare, Sanyog and Venkadesh, Kiran Vaidhya and Jacobs, Colin}, - title = {Automated segmentation of subsolid pulmonary nodules in CT scans using deep learning}, - abstract = {Lung cancer is the second most diagnosed cancer and is the leading cause of cancer-related deaths globally. A pulmonary nodule can turn into cancer, leading to fatal outcomes if left undetected. Compared to other types of pulmonary nodules, subsolid nodules (SSN) pose a higher risk of malignancy. Subsolid nodules can be categorized into two subtypes: ground-glass opacities (GGOs) or part-solid nodules (PSNs). The assessment of SSNs by physicians on cancer risk and the stage is highly dependent on the size and volume of the nodule. Therefore accurate segmentations are crucial for volumetric calculations when dealing with SSNs. Currently, semi-automated methods are deployed to segment the boundaries of SSNs. This requires a radiologist’s manual inputs and fine- tuning and could lead to sub-optimal results. Furthermore, there is no study to date which focuses on evaluating the performance of deep learning in SSNs. Over the past decade, deep learning has made significant strides in medical imaging segmentation, and networks like nnUNet have demonstrated great potential in adapting to new datasets. In this research, nnUNet was used to build a fully-automated segmentation model. However, the successful application of the model requires a high-quality dataset with accurate annotations, particularly for the segmentation of SSNs, which has been an area of limited research. To address this, our research focused on creating a carefully curated dataset with annotations provided by an experienced radiologist using a dedicated lung screening workstation. The model achieved a Dice similarity coefficient of 83.3% for the GGOs and 77.6% & 76% for non-solid and solid core respectively for the PSNs on an external validation dataset. The model provides satisfactory segmentation results in a minimal time, without any external input. It is able to learn the behaviour of the semi-automated method to produce similar segmentation. The model has shown promising potential to generate accurate and objective segmentation without human input for the subsolid nodules. The proposed model acts as a good benchmark in the segmentation of subsolid pulmonary nodules.}, - file = {Vyaw23.pdf:pdf\\Vyaw23.pdf:PDF}, - journal = {Master thesis}, - optnote = {DIAG}, - school = {Radboud University Medical Center}, - year = {2023}, -} - -@MastersThesis{Phil23, - author = {Philipp, Lena}, - title = {Body Composition Assessment in 3D CT Images}, - abstract = {Body composition as a diagnostic and prognostic biomarker is gaining importance in various medical fields such as oncology. Therefore, accurate quantification methods are necessary, such as analyzing CT images. While several studies introduced deep learning approaches to automatically segment a single slice, quantifying body composition in 3D remains understudied due to the high required annotation effort. This thesis explores the use of annotation-efficient strategies in developing a body composition segmentation model for the abdomen and pelvis. To address this, a fine-tuning strategy was proposed to optimize the annotation process and extend the model’s generalization performance trained with L3 slices to the entire abdomen and pelvis. Moreover, a self-supervised pre-training using a contrastive loss was employed to leverage unlabeled data. The goal was to efficiently use the annotated data in developing the segmentation model. The results showed a significant acceleration of the annotation process. However, the pre-training added only limited benefits. The final model achieved excellent results with dice scores of SM: 0.96, VAT: 0.93, and SAT: 0.97. The insights gained from this study are useful to improve annotation procedures, and the developed body composition segmentation model can be used for further evaluation.}, - file = {Phil23.pdf:pdf\\Phil23.pdf:PDF}, - journal = {Master thesis}, - optnote = {DIAG}, - school = {Radboud University Medical Center}, - year = {2023}, -} - -@Article{Vina23, - author = {Vinayahalingam, Shankeeth and Kempers, Steven and Schoep, Julian and Hsu, Tzu-Ming Harry and Moin, David Anssari and van Ginneken, Bram and Fl\"{u}gge, Tabea and Hanisch, Marcel and Xi, Tong}, - title = {Intra-oral scan segmentation using deep learning}, - doi = {10.1186/s12903-023-03362-8}, - url = {http://dx.doi.org/10.1186/s12903-023-03362-8}, - volume = {23}, - abstract = {Abstract - Objective - Intra-oral scans and gypsum cast scans (OS) are widely used in orthodontics, prosthetics, implantology, and orthognathic surgery to plan patient-specific treatments, which require teeth segmentations with high accuracy and resolution. Manual teeth segmentation, the gold standard up until now, is time-consuming, tedious, and observer-dependent. This study aims to develop an automated teeth segmentation and labeling system using deep learning. - - Material and methods - As a reference, 1750 OS were manually segmented and labeled. A deep-learning approach based on PointCNN and 3D U-net in combination with a rule-based heuristic algorithm and a combinatorial search algorithm was trained and validated on 1400 OS. Subsequently, the trained algorithm was applied to a test set consisting of 350 OS. The intersection over union (IoU), as a measure of accuracy, was calculated to quantify the degree of similarity between the annotated ground truth and the model predictions. - - Results - The model achieved accurate teeth segmentations with a mean IoU score of 0.915. The FDI labels of the teeth were predicted with a mean accuracy of 0.894. The optical inspection showed excellent position agreements between the automatically and manually segmented teeth components. Minor flaws were mostly seen at the edges. - - Conclusion - The proposed method forms a promising foundation for time-effective and observer-independent teeth segmentation and labeling on intra-oral scans. - - Clinical significance - Deep learning may assist clinicians in virtual treatment planning in orthodontics, prosthetics, implantology, and orthognathic surgery. The impact of using such models in clinical practice should be explored.}, - citation-count = {0}, - file = {Vina23.pdf:pdf\Vina23.pdf:PDF}, - journal = {BMC Oral Health}, - optnote = {DIAG, RADIOLOGY}, - pmid = {37670290}, - year = {2023}, -} - - -@Article{Berg23, - title = {ChatGPT and Generating a Differential Diagnosis Early in an Emergency Department Presentation}, - journal = {Annals of Emergency Medicine}, - issn = {0196-0644}, - doi = {https://doi.org/10.1016/j.annemergmed.2023.08.003}, - url = {https://www.sciencedirect.com/science/article/pii/S019606442300642X}, - author = {Hidde ten Berg and Bram van Bakel and Lieke van de Wouw and Kim E. Jie and Anoeska Schipper and Henry Jansen and Rory D. O’Connor and Bram van Ginneken and Steef Kurstjens}, - citation-count = {0}, - optnote = {DIAG, RADIOLOGY}, - pmid = {37690022}, - year = {2023}, -} - - -@article{Vent23, - author = {De Vente, Coen and Vermeer, Koenraad A. and Jaccard, Nicolas and Wang, He and Sun, Hongyi and Khader, Firas and Truhn, Daniel and Aimyshev, Temirgali and Zhanibekuly, Yerkebulan and Le, Tien-Dung and Galdran, Adrian and Ballester, Miguel \'{A}ngel Gonz\'{a}lez and Carneiro, Gustavo and Devika, R G and Hrishikesh, P S and Puthussery, Densen and Liu, Hong and Yang, Zekang and Kondo, Satoshi and Kasai, Satoshi and Wang, Edward and Durvasula, Ashritha and Heras, J\'{o}nathan and Zapata, Miguel \'{A}ngel and Ara\'{u}jo, Teresa and Aresta, Guilherme and Bogunovi\'{c}, Hrvoje and Arikan, Mustafa and Lee, Yeong Chan and Cho, Hyun Bin and Choi, Yoon Ho and Qayyum, Abdul and Razzak, Imran and Van Ginneken, Bram and Lemij, Hans G. and S\'{a}nchez, Clara I.}, - title = {AIROGS: Artificial Intelligence for RObust Glaucoma Screening Challenge}, - doi = {10.1109/tmi.2023.3313786}, - year = {2023}, - abstract = {Abstract unavailable}, - url = {http://dx.doi.org/10.1109/TMI.2023.3313786}, - file = {Vent23.pdf:pdf\Vent23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {IEEE Transactions on Medical Imaging}, - citation-count = {0}, - automatic = {yes}, - pages = {1-1}, -} - -@article{Thag23, - author = {Thagaard, Jeppe and Broeckx, Glenn and Page, David B and Jahangir, Chowdhury Arif and Verbandt, Sara and Kos, Zuzana and Gupta, Rajarsi and Khiroya, Reena and Abduljabbar, Khalid and Acosta Haab, Gabriela and Acs, Balazs and Akturk, Guray and Almeida, Jonas S and Alvarado‐Cabrero, Isabel and Amgad, Mohamed and Azmoudeh‐Ardalan, Farid and Badve, Sunil and Baharun, Nurkhairul Bariyah and Balslev, Eva and Bellolio, Enrique R and Bheemaraju, Vydehi and Blenman, Kim RM and Botinelly Mendon\c{c}a Fujimoto, Luciana and Bouchmaa, Najat and Burgues, Octavio and Chardas, Alexandros and Chon U Cheang, Maggie and Ciompi, Francesco and Cooper, Lee AD and Coosemans, An and Corredor, Germ\'{a}n and Dahl, Anders B and Dantas Portela, Flavio Luis and Deman, Frederik and Demaria, Sandra and Dor\'{e} Hansen, Johan and Dudgeon, Sarah N and Ebstrup, Thomas and Elghazawy, Mahmoud and Fernandez‐Mart\'{i}n, Claudio and Fox, Stephen B and Gallagher, William M and Giltnane, Jennifer M and Gnjatic, Sacha and Gonzalez‐Ericsson, Paula I and Grigoriadis, Anita and Halama, Niels and Hanna, Matthew G and Harbhajanka, Aparna and Hart, Steven N and Hartman, Johan and Hauberg, S\oren and Hewitt, Stephen and Hida, Akira I and Horlings, Hugo M and Husain, Zaheed and Hytopoulos, Evangelos and Irshad, Sheeba and Janssen, Emiel AM and Kahila, Mohamed and Kataoka, Tatsuki R and Kawaguchi, Kosuke and Kharidehal, Durga and Khramtsov, Andrey I and Kiraz, Umay and Kirtani, Pawan and Kodach, Liudmila L and Korski, Konstanty and Kov\'{a}cs, Anik\'{o} and Laenkholm, Anne‐Vibeke and Lang‐Schwarz, Corinna and Larsimont, Denis and Lennerz, Jochen K and Lerousseau, Marvin and Li, Xiaoxian and Ly, Amy and Madabhushi, Anant and Maley, Sai K and Manur Narasimhamurthy, Vidya and Marks, Douglas K and McDonald, Elizabeth S and Mehrotra, Ravi and Michiels, Stefan and Minhas, Fayyaz ul Amir Afsar and Mittal, Shachi and Moore, David A and Mushtaq, Shamim and Nighat, Hussain and Papathomas, Thomas and Penault‐Llorca, Frederique and Perera, Rashindrie D and Pinard, Christopher J and Pinto‐Cardenas, Juan Carlos and Pruneri, Giancarlo and Pusztai, Lajos and Rahman, Arman and Rajpoot, Nasir Mahmood and Rapoport, Bernardo Leon and Rau, Tilman T and Reis‐Filho, Jorge S and Ribeiro, Joana M and Rimm, David and Roslind, Anne and Vincent‐Salomon, Anne and Salto‐Tellez, Manuel and Saltz, Joel and Sayed, Shahin and Scott, Ely and Siziopikou, Kalliopi P and Sotiriou, Christos and Stenzinger, Albrecht and Sughayer, Maher A and Sur, Daniel and Fineberg, Susan and Symmans, Fraser and Tanaka, Sunao and Taxter, Timothy and Tejpar, Sabine and Teuwen, Jonas and Thompson, E Aubrey and Tramm, Trine and Tran, William T and van der Laak, Jeroen and van Diest, Paul J and Verghese, Gregory E and Viale, Giuseppe and Vieth, Michael and Wahab, Noorul and Walter, Thomas and Waumans, Yannick and Wen, Hannah Y and Yang, Wentao and Yuan, Yinyin and Zin, Reena Md and Adams, Sylvia and Bartlett, John and Loibl, Sibylle and Denkert, Carsten and Savas, Peter and Loi, Sherene and Salgado, Roberto and Specht Stovgaard, Elisabeth}, - title = {Pitfalls in machine learning‐based assessment of tumor‐infiltrating lymphocytes in breast cancer: A report of the International Immuno‐Oncology Biomarker Working Group on Breast Cancer}, - doi = {10.1002/path.6155}, - year = {2023}, - abstract = {AbstractThe clinical significance of the tumor‐immune interaction in breast cancer is now established, and tumor‐infiltrating lymphocytes (TILs) have emerged as predictive and prognostic biomarkers for patients with triple‐negative (estrogen receptor, progesterone receptor, and HER2‐negative) breast cancer and HER2‐positive breast cancer. How computational assessments of TILs might complement manual TIL assessment in trial and daily practices is currently debated. Recent efforts to use machine learning (ML) to automatically evaluate TILs have shown promising results. We review state‐of‐the‐art approaches and identify pitfalls and challenges of automated TIL evaluation by studying the root cause of ML discordances in comparison to manual TIL quantification. We categorize our findings into four main topics: (1) technical slide issues, (2) ML and image analysis aspects, (3) data challenges, and (4) validation issues. The main reason for discordant assessments is the inclusion of false‐positive areas or cells identified by performance on certain tissue patterns or design choices in the computational implementation. To aid the adoption of ML for TIL assessment, we provide an in‐depth discussion of ML and image analysis, including validation issues that need to be considered before reliable computational reporting of TILs can be incorporated into the trial and routine clinical management of patients with triple‐negative breast cancer. © 2023 The Authors. The Journal of Pathology published by John Wiley & Sons Ltd on behalf of The Pathological Society of Great Britain and Ireland.}, - url = {http://dx.doi.org/10.1002/path.6155}, - file = {Thag23.pdf:pdf\Thag23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {The Journal of Pathology}, - citation-count = {2}, - automatic = {yes}, - pages = {498-513}, - volume = {260}, -} - -@article{Meno23, - author = {Menotti, Laura and Silvello, Gianmaria and Atzori, Manfredo and Boytcheva, Svetla and Ciompi, Francesco and Di Nunzio, Giorgio Maria and Fraggetta, Filippo and Giachelle, Fabio and Irrera, Ornella and Marchesin, Stefano and Marini, Niccol\`{o} and M\"{u}ller, Henning and Primov, Todor}, - title = {Modelling digital health data: The ExaMode ontology for computational pathology}, - doi = {10.1016/j.jpi.2023.100332}, - year = {2023}, - abstract = {Abstract unavailable}, - url = {http://dx.doi.org/10.1016/j.jpi.2023.100332}, - file = {Meno23.pdf:pdf\Meno23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {Journal of Pathology Informatics}, - citation-count = {0}, - automatic = {yes}, - pages = {100332}, - volume = {14}, -} - -@article{Page23, - author = {Page, David B and Broeckx, Glenn and Jahangir, Chowdhury Arif and Verbandt, Sara and Gupta, Rajarsi R and Thagaard, Jeppe and Khiroya, Reena and Kos, Zuzana and Abduljabbar, Khalid and Acosta Haab, Gabriela and Acs, Balazs and Akturk, Guray and Almeida, Jonas S and Alvarado‐Cabrero, Isabel and Azmoudeh‐Ardalan, Farid and Badve, Sunil and Baharun, Nurkhairul Bariyah and Bellolio, Enrique R and Bheemaraju, Vydehi and Blenman, Kim RM and Botinelly Mendon\c{c}a Fujimoto, Luciana and Bouchmaa, Najat and Burgues, Octavio and Cheang, Maggie Chon U and Ciompi, Francesco and Cooper, Lee AD and Coosemans, An and Corredor, Germ\'{a}n and Dantas Portela, Flavio Luis and Deman, Frederik and Demaria, Sandra and Dudgeon, Sarah N and Elghazawy, Mahmoud and Ely, Scott and Fernandez‐Mart\'{i}n, Claudio and Fineberg, Susan and Fox, Stephen B and Gallagher, William M and Giltnane, Jennifer M and Gnjatic, Sacha and Gonzalez‐Ericsson, Paula I and Grigoriadis, Anita and Halama, Niels and Hanna, Matthew G and Harbhajanka, Aparna and Hardas, Alexandros and Hart, Steven N and Hartman, Johan and Hewitt, Stephen and Hida, Akira I and Horlings, Hugo M and Husain, Zaheed and Hytopoulos, Evangelos and Irshad, Sheeba and Janssen, Emiel AM and Kahila, Mohamed and Kataoka, Tatsuki R and Kawaguchi, Kosuke and Kharidehal, Durga and Khramtsov, Andrey I and Kiraz, Umay and Kirtani, Pawan and Kodach, Liudmila L and Korski, Konstanty and Kov\'{a}cs, Anik\'{o} and Laenkholm, Anne‐Vibeke and Lang‐Schwarz, Corinna and Larsimont, Denis and Lennerz, Jochen K and Lerousseau, Marvin and Li, Xiaoxian and Ly, Amy and Madabhushi, Anant and Maley, Sai K and Manur Narasimhamurthy, Vidya and Marks, Douglas K and McDonald, Elizabeth S and Mehrotra, Ravi and Michiels, Stefan and Minhas, Fayyaz ul Amir Afsar and Mittal, Shachi and Moore, David A and Mushtaq, Shamim and Nighat, Hussain and Papathomas, Thomas and Penault‐Llorca, Frederique and Perera, Rashindrie D and Pinard, Christopher J and Pinto‐Cardenas, Juan Carlos and Pruneri, Giancarlo and Pusztai, Lajos and Rahman, Arman and Rajpoot, Nasir Mahmood and Rapoport, Bernardo Leon and Rau, Tilman T and Reis‐Filho, Jorge S and Ribeiro, Joana M and Rimm, David and Vincent‐Salomon, Anne and Salto‐Tellez, Manuel and Saltz, Joel and Sayed, Shahin and Siziopikou, Kalliopi P and Sotiriou, Christos and Stenzinger, Albrecht and Sughayer, Maher A and Sur, Daniel and Symmans, Fraser and Tanaka, Sunao and Taxter, Timothy and Tejpar, Sabine and Teuwen, Jonas and Thompson, E Aubrey and Tramm, Trine and Tran, William T and van der Laak, Jeroen and van Diest, Paul J and Verghese, Gregory E and Viale, Giuseppe and Vieth, Michael and Wahab, Noorul and Walter, Thomas and Waumans, Yannick and Wen, Hannah Y and Yang, Wentao and Yuan, Yinyin and Adams, Sylvia and Bartlett, John Mark Seaverns and Loibl, Sibylle and Denkert, Carsten and Savas, Peter and Loi, Sherene and Salgado, Roberto and Specht Stovgaard, Elisabeth}, - title = {Spatial analyses of immune cell infiltration in cancer: current methods and future directions: A report of the International Immuno‐Oncology Biomarker Working Group on Breast Cancer}, - doi = {10.1002/path.6165}, - year = {2023}, - abstract = {Modern histologic imaging platforms coupled with machine learning methods have provided new opportunities to map the spatial distribution of immune cells in the tumor microenvironment. However, there exists no standardized method for describing or analyzing spatial immune cell data, and most reported spatial analyses are rudimentary. In this review, we provide an overview of two approaches for reporting and analyzing spatial data (raster versus vector‐based). We then provide a compendium of spatial immune cell metrics that have been reported in the literature, summarizing prognostic associations in the context of a variety of cancers. We conclude by discussing two well‐described clinical biomarkers, the breast cancer stromal tumor infiltrating lymphocytes score and the colon cancer Immunoscore, and describe investigative opportunities to improve clinical utility of these spatial biomarkers. © 2023 The Pathological Society of Great Britain and Ireland.}, - url = {http://dx.doi.org/10.1002/path.6165}, - file = {Page23.pdf:pdf\Page23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {The Journal of Pathology}, - citation-count = {1}, - automatic = {yes}, - pages = {514-532}, - volume = {260}, -} - -@article{Bokh23b, - author = {Bokhorst, John-Melle and Ciompi, Francesco and \"{O}zt\"{u}rk, Sonay Kus and Oguz Erdogan, Ayse Selcen and Vieth, Michael and Dawson, Heather and Kirsch, Richard and Simmer, Femke and Sheahan, Kieran and Lugli, Alessandro and Zlobec, Inti and van der Laak, Jeroen and Nagtegaal, Iris D.}, - title = {Fully Automated Tumor Bud Assessment in Hematoxylin and Eosin-Stained Whole Slide Images of Colorectal Cancer}, - doi = {10.1016/j.modpat.2023.100233}, - year = {2023}, - abstract = {Abstract unavailable}, - url = {http://dx.doi.org/10.1016/j.modpat.2023.100233}, - file = {Bokh23b.pdf:pdf\Bokh23b.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {Modern Pathology}, - citation-count = {0}, - automatic = {yes}, - pages = {100233}, - volume = {36}, -} - -@Conference{Leon23, - author = {Leon-Ferre, Roberto A. and Carter, Jodi M. and Zahrieh, David and Sinnwell, Jason P. and Salgado, Roberto and Suman, Vera and Hillman, David and Boughey, Judy C. and Kalari, Krishna R. and Couch, Fergus J. and Ingle, James N. and Balkenkohl, Maschenka and Ciompi, Francesco and van der Laak, Jeroen and Goetz, Matthew P.}, - title = {Abstract P2-11-34: Mitotic spindle hotspot counting using deep learning networks is highly associated with clinical outcomes in patients with early-stage triple-negative breast cancer who did not receive systemic therapy}, - doi = {10.1158/1538-7445.sabcs22-p2-11-34}, - year = {2023}, - abstract = {Abstract - Background: Triple-negative breast cancers (TNBC) exhibit high rates of recurrence and mortality. However, recent studies suggest that a subset of patients (pts) with early-stage TNBC enriched in tumor-infiltrating lymphocytes (TILs) have excellent clinical outcomes even in the absence of systemic therapy. Additional histological biomarkers that could identify pts for future systemic therapy escalation/de-escalation strategies are of great interest. TNBC are frequently highly proliferative with abundant mitoses. However, classic markers of proliferation (manual mitosis counting and Ki-67) appear to offer no prognostic value. Here, we evaluated the prognostic effects of automated mitotic spindle hotspot (AMSH) counting on RFS in independent cohorts of systemically untreated early-stage TNBC. - Methods: AMSH counting was conducted with a state-of-the-art deep learning algorithm trained on the detection of mitoses within 2 mm2 areas with the highest mitotic density (i.e. hotspots) in digital H&E images. Details of the development, training and validation of the algorithm were published previously [1] in a cohort of unselected TNBC. We obtained AMSH counts in a centrally confirmed TNBC cohort from Mayo Clinic [2] and focused our analysis on pts who received locoregional therapy but no systemic therapy. Using a fractional polynomial analysis with a multivariable proportional hazards regression model, we confirmed the assumption of linearity in the log hazard for the continuous variable AMSH and evaluated whether AMSH counts were prognostic of RFS. We corroborated our findings in an independent cohort of systemically untreated TNBC pts from the Radboud University Medical Center in the Netherlands (Radboud Cohort). Results are reported at a median follow-up of 8.1 and 6.7 years for the Mayo and Netherlands cohorts, respectively. - Results: Among 182 pts with who did not receive systemic therapy in the Mayo Cohort, 140 (77\%) with available AMSH counts were included. The mean age was 61 (range: 31-94), 71\% were postmenopausal, 67\% had tumors ≤ 2cm, and 83\% were node-negative. As expected, most tumors were Nottingham grade 3 (84\%) and had a high Ki-67 proliferation index (54\% with Ki-67 &gt;30\%). Most tumors (73\%) had stromal TILs ≤ 30\%. The median AMSH count was 18 (IQR: 8, 42). AMSH counts were linearly associated with grade and tumor size, with the proportion of pts with grade 3 tumors and size &gt; 2 cm increasing as the AMSH counts increased (p=0.007 and p=0.059, respectively). In a multivariate model controlling for nodal status, tumor size, and stromal TILs, AMSH counts were independently associated with RFS (p&lt; 0.0001). For every 10-point increase in the AMSH count, we observed a 17\% increase in the risk of experiencing an RFS event (HR 1.17, 95\% CI 1.08-1.26). We corroborated our findings in the Radboud Cohort (n=126). The mean age was 68 (range: 40-96), and 81\% were node-negative. While the median AMSH count was 36 (IQR: 16-63), higher than in the Mayo Cohort (p=0.004), the prognostic impact was similar, with a significant association between AMSH count and RFS (p=0.028) in a multivariate model corrected for nodal status, tumor size, and stromal TILs. For every 10-point increase in the AMSH count in the Netherlands cohort, we observed a 9\% increase in the risk of experiencing an RFS event (HR 1.09, 95\% CI 1.01-1.17). RFS rates according to AMSH counts for both cohorts are shown in the Table. - Conclusions: AMSH counting is a new proliferation biomarker that provides prognostic value independent of nodal status, tumor size, and stromal TILs in systemically untreated early-stage TNBC. Plans are underway to evaluate AMSH counts in additional cohorts of systemically untreated TNBC, and in other disease settings such as prior to neoadjuvant systemic therapy. If validated, this biomarker should be prospectively evaluated as a potential selection biomarker in clinical trials of systemic therapy de-escalation. - References: - 1. PMID: 29994086 - 2. PMID: 28913760 - Table RFS according to AMSH counts in the Mayo and Radboud Cohorts - Citation Format: Roberto A. Leon-Ferre, Jodi M. Carter, David Zahrieh, Jason P. Sinnwell, Roberto Salgado, Vera Suman, David Hillman, Judy C. Boughey, Krishna R. Kalari, Fergus J. Couch, James N. Ingle, Maschenka Balkenkohl, Francesco Ciompi, Jeroen van der Laak, Matthew P. Goetz. Mitotic spindle hotspot counting using deep learning networks is highly associated with clinical outcomes in patients with early-stage triple-negative breast cancer who did not receive systemic therapy [abstract]. In: Proceedings of the 2022 San Antonio Breast Cancer Symposium; 2022 Dec 6-10; San Antonio, TX. Philadelphia (PA): AACR; Cancer Res 2023;83(5 Suppl):Abstract nr P2-11-34.}, - url = {http://dx.doi.org/10.1158/1538-7445.SABCS22-P2-11-34}, - file = {Leon23.pdf:pdf\Leon23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {Cancer Research}, - citation-count = {0}, - automatic = {yes}, - pages = {P2-11-34-P2-11-34}, - volume = {83}, -} - -@article{Alve23, - author = {Alves, Nat\'{a}lia and Bosma, Joeran S. and Venkadesh, Kiran V. and Jacobs, Colin and Saghir, Zaigham and de Rooij, Maarten and Hermans, John and Huisman, Henkjan}, - title = {Prediction Variability to Identify Reduced AI Performance in Cancer Diagnosis at MRI and CT}, - doi = {10.1148/radiol.230275}, - year = {2023}, - abstract = {Background:A priori identification of patients at risk of artificial intelligence (AI) failure in diagnosing cancer would contribute to the safer clinical integration of diagnostic algorithms. - Purpose:To evaluate AI prediction variability as an uncertainty quantification (UQ) metric for identifying cases at risk of AI failure in diagnosing cancer at MRI and CT across different cancer types, data sets, and algorithms. - Materials and Methods:Multicenter data sets and publicly available AI algorithms from three previous studies that evaluated detec-tion of pancreatic cancer on contrast-enhanced CT images, detection of prostate cancer on MRI scans, and prediction of pulmo-nary nodule malignancy on low-dose CT images were analyzed retrospectively. Each task’s algorithm was extended to generate an uncertainty score based on ensemble prediction variability. AI accuracy percentage and partial area under the receiver operating characteristic curve (pAUC) were compared between certain and uncertain patient groups in a range of percentile thresholds (10%–90%) for the uncertainty score using permutation tests for statistical significance. The pulmonary nodule malignancy prediction algorithm was compared with 11 clinical readers for the certain group (CG) and uncertain group (UG). - Results:In total, 18 022 images were used for training and 838 images were used for testing. AI diagnostic accuracy was higher for the cases in the CG across all tasks (P < .001). At an 80% threshold of certain predictions, accuracy in the CG was 21%–29% higher than in the UG and 4%–6% higher than in the overall test data sets. The lesion-level pAUC in the CG was 0.25–0.39 higher than in the UG and 0.05–0.08 higher than in the overall test data sets (P < .001). For pulmonary nodule malignancy prediction, accuracy of AI was on par with clinicians for cases in the CG (AI results vs clinician results, 80% [95% CI: 76, 85] vs 78% [95% CI: 70, 87]; P = .07) but worse for cases in the UG (AI results vs clinician results, 50% [95% CI: 37, 64] vs 68% [95% CI: 60, 76]; P < .001). - Conclusion:An AI-prediction UQ metric consistently identified reduced performance of AI in cancer diagnosis.}, - url = {http://dx.doi.org/10.1148/radiol.230275}, - file = {Alve23.pdf:pdf\Alve23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - pmid = {37724961}, - journal = {Radiology}, - citation-count = {1}, - automatic = {yes}, - volume = {308}, - number = {3}, - pages = {e230275}, -} - -@article{Kate23, - author = {Katende, Bulemba and Bresser, Moniek and Kamele, Mashaete and Chere, Lebohang and Tlahali, Mosa and Erhardt, Rahel Milena and Muhairwe, Josephine and Ayakaka, Irene and Glass, Tracy R. and Ruhwald, Morten and van Ginneken, Bram and Murphy, Keelin and de Vos, Margaretha and Amstutz, Alain and Mareka, Mathabo and Mooko, Sekhele Matabo and Reither, Klaus and Gonz\'{a}lez Fern\'{a}ndez, Lucia}, - title = {Impact of a multi-disease integrated screening and diagnostic model for COVID-19, TB, and HIV in Lesotho}, - doi = {10.1371/journal.pgph.0001488}, - year = {2023}, - abstract = {The surge of the COVID-19 pandemic challenged health services globally, and in Lesotho, the HIV and tuberculosis (TB) services were similarly affected. Integrated, multi-disease diagnostic services were proposed solutions to mitigate these disruptions. We describe and evaluate the effect of an integrated, hospital-based COVID-19, TB and HIV screening and diagnostic model in two rural districts in Lesotho, during the period between December 2020 and August 2022. Adults, hospital staff, and children above 5 years attending two hospitals were pre-screened for COVID-19 and TB symptoms. After a positive pre-screening, participants were offered to enroll in a service model that included clinical evaluation, chest radiography, SARS-CoV-2, TB, and HIV testing. Participants diagnosed with COVID-19, TB, or HIV were contacted after 28 days to evaluate their health status and linkage to HIV and/or TB care services. Of the 179160 participants pre-screened, 6623(3.7%) pre-screened positive, and 4371(66%) were enrolled in this service model. Of the total 458 diagnoses, only 17 happened in children. One positive rapid antigen test for SARS-CoV-2 was found per 11 participants enrolled, one Xpert-positive TB case was diagnosed per 85 people enrolled, and 1 new HIV diagnosis was done per 182 people enrolled. Of the 321(82.9%) participants contacted after 28 days of diagnosis, 304(94.7%) reported to be healthy. Of the individuals that were newly diagnosed with HIV or TB, 18/24(75.0%) and 46/51(90.1%) started treatment within 28 days of the diagnosis. This screening and diagnostic model successfully maintained same-day, integrated COVID-19, TB, and HIV testing services, despite frequent disruptions caused by the surge of COVID-19 waves, healthcare seeking patterns, and the volatile context (social measures, travel restrictions, population lockdowns). There were positive effects in avoiding diagnostic delays and ensuring linkage to services, however, diagnostic yields for adults and children were low. To inform future preparedness plans, research will need to identify essential health interventions and how to optimize them along each phase of the emergency response.}, - url = {http://dx.doi.org/10.1371/journal.pgph.0001488}, - file = {Kate23.pdf:pdf\Kate23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {PLOS Global Public Health}, - citation-count = {0}, - automatic = {yes}, - pages = {e0001488}, - volume = {3}, -} - -@article{Palm23, - author = {Palmer, Megan and Seddon, James A. and van der Zalm, Marieke M. and Hesseling, Anneke C. and Goussard, Pierre and Schaaf, H. Simon and Morrison, Julie and van Ginneken, Bram and Melendez, Jaime and Walters, Elisabetta and Murphy, Keelin}, - title = {Optimising computer aided detection to identify intra-thoracic tuberculosis on chest x-ray in South African children}, - doi = {10.1371/journal.pgph.0001799}, - year = {2023}, - abstract = {Diagnostic tools for paediatric tuberculosis remain limited, with heavy reliance on clinical algorithms which include chest x-ray. Computer aided detection (CAD) for tuberculosis on chest x-ray has shown promise in adults. We aimed to measure and optimise the performance of an adult CAD system, CAD4TB, to identify tuberculosis on chest x-rays from children with presumptive tuberculosis. Chest x-rays from 620 children <13 years enrolled in a prospective observational diagnostic study in South Africa, were evaluated. All chest x-rays were read by a panel of expert readers who attributed each with a radiological reference of either ‘tuberculosis’ or ‘not tuberculosis’. Of the 525 chest x-rays included in this analysis, 80 (40 with a reference of ‘tuberculosis’ and 40 with ‘not tuberculosis’) were allocated to an independent test set. The remainder made up the training set. The performance of CAD4TB to identify ‘tuberculosis’ versus ‘not tuberculosis’ on chest x-ray against the radiological reference read was calculated. The CAD4TB software was then fine-tuned using the paediatric training set. We compared the performance of the fine-tuned model to the original model. Our findings were that the area under the receiver operating characteristic curve (AUC) of the original CAD4TB model, prior to fine-tuning, was 0.58. After fine-tuning there was an improvement in the AUC to 0.72 (p = 0.0016). In this first-ever description of the use of CAD to identify tuberculosis on chest x-ray in children, we demonstrate a significant improvement in the performance of CAD4TB after fine-tuning with a set of well-characterised paediatric chest x-rays. CAD has the potential to be a useful additional diagnostic tool for paediatric tuberculosis. We recommend replicating the methods we describe using a larger chest x-ray dataset from a more diverse population and evaluating the potential role of CAD to replace a human-read chest x-ray within treatment-decision algorithms for paediatric tuberculosis.}, - url = {http://dx.doi.org/10.1371/journal.pgph.0001799}, - file = {Palm23.pdf:pdf\Palm23.pdf:PDF}, - optnote = {DIAG, RADIOLOGY}, - journal = {PLOS Global Public Health}, - citation-count = {1}, - automatic = {yes}, - pages = {e0001799}, - volume = {3}, -} - -@Article{Hend23b, - author = {Hendrix, Ward and Hendrix, Nils and Scholten, Ernst T. and Mourits, Mari\"{e}lle and Trap-de Jong, Joline and Schalekamp, Steven and Korst, Mike and van Leuken, Maarten and van Ginneken, Bram and Prokop, Mathias and Rutten, Matthieu and Jacobs, Colin}, - title = {Deep learning for the detection of benign and malignant pulmonary nodules in non-screening chest CT scans}, - doi = {10.1038/s43856-023-00388-5}, - url = {http://dx.doi.org/10.1038/s43856-023-00388-5}, - volume = {3}, - number = {1}, - algorithm = {https://grand-challenge.org/algorithms/lung-nodule-detector-for-ct/}, - abstract = {Abstract - Background - Outside a screening program, early-stage lung cancer is generally diagnosed after the detection of incidental nodules in clinically ordered chest CT scans. Despite the advances in artificial intelligence (AI) systems for lung cancer detection, clinical validation of these systems is lacking in a non-screening setting. - - Method - We developed a deep learning-based AI system and assessed its performance for the detection of actionable benign nodules (requiring follow-up), small lung cancers, and pulmonary metastases in CT scans acquired in two Dutch hospitals (internal and external validation). A panel of five thoracic radiologists labeled all nodules, and two additional radiologists verified the nodule malignancy status and searched for any missed cancers using data from the national Netherlands Cancer Registry. The detection performance was evaluated by measuring the sensitivity at predefined false positive rates on a free receiver operating characteristic curve and was compared with the panel of radiologists. - - Results - On the external test set (100 scans from 100 patients), the sensitivity of the AI system for detecting benign nodules, primary lung cancers, and metastases is respectively 94.3% (82/87, 95% CI: 88.1-98.8%), 96.9% (31/32, 95% CI: 91.7-100%), and 92.0% (104/113, 95% CI: 88.5-95.5%) at a clinically acceptable operating point of 1 false positive per scan (FP/s). These sensitivities are comparable to or higher than the radiologists, albeit with a slightly higher FP/s (average difference of 0.6). - - Conclusions - The AI system reliably detects benign and malignant pulmonary nodules in clinically indicated CT scans and can potentially assist radiologists in this setting.}, - citation-count = {0}, - file = {Hend23b.pdf:pdf\Hend23b.pdf:PDF}, - journal = {Communications Medicine}, - pages = {156}, - optnote = {DIAG, RADIOLOGY}, - pmid = {37891360}, - year = {2023}, -} - -@Conference{Anto23a, - author = {Antonissen, N and Venkadesh, K and Gietema, H and Vliegenthart, R and Saghir, Z and Silva, M and Pastorino, E, and Scholten, E. Th and Prokop, M and Schaefer-Prokop, C and Jacobs, C }, - booktitle = ESTI, - title = {Retrospective identification of low-risk individuals eligible for biennial lung cancer screening using PanCan-based and deep learning-based risk thresholds}, - abstract = {Purpose: Current nodule management protocols for managing negative screening results have varying follow-up intervals; LungRADS recommends a 1-year screening interval for all negative screens (category 1/2), while the International Lung Screen Trial (ILST) protocol recommends 1-year interval for participants with indeterminate nodules (PanCan score 1.5% - 6%) and 2-year interval for participants with no or very low risk nodules (PanCan score < 1.5%). In this study, we retrospectively evaluated the use of PanCan and DL-based malignancy thresholds to identify individuals eligible for biennial screening, aiming to reduce screening-related harms and enhancing cost-effectiveness without causing potential delay of cancer diagnosis. -Methods and materials: All baseline CT-scans from the Danish Lung Cancer Screening Trial (DLCST) and Multicentric Italian Lung Detection (MILD) were pooled and linked to a lung cancer diagnosis within 2 years, resulting in 4.157 non-cancer and 53 cancer cases. PanCan1a and DL-based malignancy risk scores were calculated for all screen-annotated nodules. For cases with no screen-annotated nodules, the risk score for participants was set as 0%. For both risk calculators, we used a nodule-risk cut-off of < 1.5% to identify low-risk participants for biennial follow-up, based on the ILST protocol. We computed the number of low-risk participants eligible for biennial screening for all included baseline scans (n=4.210) using the risk dominant nodule per scan and calculated the number of cancer cases in the biennial group. -Results: The DL-based and PanCan-based risk threshold < 1.5% identified 3.729 and 3.720 individuals, respectively, meeting the criteria for biennial screening. This would result in a reduction of 88.6% and 88.4% of the scans in the second screening round, respectively. The group referred for biennial screening included 14 and 16 cancers with DL and PanCan-based risk scores <1.5%, respectively. Most of the cancer cases (n=13), had no nodule annotated at baseline CT, leading to a 0% risk score at baseline. Retrospectively 4 of the 13 cancers were visible in the baseline scan, yet primarily not annotated by the screening radiologist. -Conclusion: Risk threshold-based identification of low-risk subjects for biennial screening largely reduces the number of 1-year follow-up scans. DL and PanCan for risk assessment performed very similarly, indicating the potential of DL for readily available risk assessment of baseline scans. A risk threshold of < 1.5%, as implemented in the ILST protocol, leads to delayed diagnosis of cancers either primarily missed during baseline or developing as interval cancers. More research is needed to study the type of cancers with delayed diagnosis and whether such delay leads to diagnostic stage shift. -Limitations: This study is a retrospective analysis on data from two screening trials, restricted to the baseline round.}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Book{Aubr23, - author = {Aubreville, Marc and Stathonikos, Nikolas and Bertram, Christof A. and Klopfleisch, Robert and Hoeve, Natalie ter and Ciompi, Francesco and Wilm, Frauke and Marzahl, Christian and Donovan, Taryn A. and Maier, Andreas and Veta, Mitko and Breininger, Katharina}, - title = {Abstract: the MIDOG Challenge 2021}, - doi = {10.1007/978-3-658-41657-7_26}, - pages = {115-115}, - url = {http://dx.doi.org/10.1007/978-3-658-41657-7_26}, - abstract = {Abstract unavailable}, - automatic = {yes}, - citation-count = {0}, - file = {Aubr23.pdf:pdf\Aubr23.pdf:PDF}, - journal = {Bildverarbeitung fur die Medizin, Workshop}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Article{Mama23, - author = {Mamani, Gabriel Efrain Humpire and Lessmann, Nikolas and Scholten, Ernst Th. and Prokop, Mathias and Jacobs, Colin and van Ginneken, Bram}, - title = {Kidney abnormality segmentation in thorax-abdomen CT scans}, - doi = {10.48550/ARXIV.2309.03383}, - url = {https://arxiv.org/abs/2309.03383}, - abstract = {In this study, we introduce a deep learning approach for segmenting kidney parenchyma and kidney abnormalities to support clinicians in identifying and quantifying renal abnormalities such as cysts, lesions, masses, metastases, and primary tumors. Our end-to-end segmentation method was trained on 215 contrast-enhanced thoracic-abdominal CT scans, with half of these scans containing one or more abnormalities. We began by implementing our own version of the original 3D U-Net network and incorporated four additional components: an end-to-end multi-resolution approach, a set of task-specific data augmentations, a modified loss function using top-$k$, and spatial dropout. Furthermore, we devised a tailored post-processing strategy. Ablation studies demonstrated that each of the four modifications enhanced kidney abnormality segmentation performance, while three out of four improved kidney parenchyma segmentation. Subsequently, we trained the nnUNet framework on our dataset. By ensembling the optimized 3D U-Net and the nnUNet with our specialized post-processing, we achieved marginally superior results. Our best-performing model attained Dice scores of 0.965 and 0.947 for segmenting kidney parenchyma in two test sets (20 scans without abnormalities and 30 with abnormalities), outperforming an independent human observer who scored 0.944 and 0.925, respectively. In segmenting kidney abnormalities within the 30 test scans containing them, the top-performing method achieved a Dice score of 0.585, while an independent second human observer reached a score of 0.664, suggesting potential for further improvement in computerized methods. All training data is available to the research community under a CC-BY 4.0 license on https://doi.org/10.5281/zenodo.8014289}, - automatic = {yes}, - file = {Mama23.pdf:pdf\\Mama23.pdf:PDF}, - journal = {arXiv:2309.03383}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Article{Brou23, - author = {Brouwer, N.P.M. and Khan, A. and Bokhorst, J.M. and Ayatollahi, F. and Hay, J. and Ciompi, F. and Simmer, F. and Hugen, N. and de Wilt, J.H.W. and Berger, M.D. and Lugli, A. and Zlobec, I. and Edwards, J. and Nagtegaal, I.D.}, - title = {The complexity of shapes; how the circularity of tumor nodules impacts prognosis in colorectal cancer}, - doi = {10.1016/j.modpat.2023.100376}, - pages = {100376}, - url = {http://dx.doi.org/10.1016/j.modpat.2023.100376}, - abstract = {Abstract unavailable}, - automatic = {yes}, - citation-count = {0}, - file = {Brou23.pdf:pdf\\Brou23.pdf:PDF}, - journal = {Modern Pathology}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Article{Jiao23, - author = {Jiao, Yiping and van der Laak, Jeroen and Albarqouni, Shadi and Li, Zhang and Tan, Tao and Bhalerao, Abhir and Cheng, Shenghua and Ma, Jiabo and Pocock, Johnathan and Pluim, Josien P.W. and Koohbanani, Navid Alemi and Bashir, Raja Muhammad Saad and Raza, Shan E Ahmed and Liu, Sibo and Graham, Simon and Wetstein, Suzanne and Khurram, Syed Ali and Liu, Xiuli and Rajpoot, Nasir and Veta, Mitko and Ciompi, Francesco}, - title = {LYSTO: The Lymphocyte Assessment Hackathon and Benchmark Dataset}, - doi = {10.1109/jbhi.2023.3327489}, - pages = {1-12}, - url = {http://dx.doi.org/10.1109/JBHI.2023.3327489}, - abstract = {Abstract unavailable}, - automatic = {yes}, - citation-count = {0}, - file = {Jiao23.pdf:pdf\\Jiao23.pdf:PDF}, - journal = {IEEE Journal of Biomedical and Health Informatics}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Article{Hump23, - author = {Humpire-Mamani, Gabriel Efrain and Jacobs, Colin and Prokop, Mathias and van Ginneken, Bram and Lessmann, Nikolas}, - title = {Transfer learning from a sparsely annotated dataset of 3D medical images}, - doi = {10.48550/ARXIV.2311.05032}, - url = {https://arxiv.org/abs/2311.05032}, - abstract = {Transfer learning leverages pre-trained model features from a large dataset to save time and resources when training new models for various tasks, potentially enhancing performance. Due to the lack of large datasets in the medical imaging domain, transfer learning from one medical imaging model to other medical imaging models has not been widely explored. This study explores the use of transfer learning to improve the performance of deep convolutional neural networks for organ segmentation in medical imaging. A base segmentation model (3D U-Net) was trained on a large and sparsely annotated dataset; its weights were used for transfer learning on four new down-stream segmentation tasks for which a fully annotated dataset was available. We analyzed the training set size's influence to simulate scarce data. The results showed that transfer learning from the base model was beneficial when small datasets were available, providing significant performance improvements; where fine-tuning the base model is more beneficial than updating all the network weights with vanilla transfer learning. Transfer learning with fine-tuning increased the performance by up to 0.129 (+28\%) Dice score than experiments trained from scratch, and on average 23 experiments increased the performance by 0.029 Dice score in the new segmentation tasks. The study also showed that cross-modality transfer learning using CT scans was beneficial. The findings of this study demonstrate the potential of transfer learning to improve the efficiency of annotation and increase the accessibility of accurate organ segmentation in medical imaging, ultimately leading to improved patient care. We made the network definition and weights publicly available to benefit other users and researchers.}, - automatic = {yes}, - file = {Hump23.pdf:pdf\\Hump23.pdf:PDF}, - journal = {arXiv:2311.05032}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Article{Aswo23, - author = {Aswolinskiy, Witali and Munari, Enrico and Horlings, Hugo M. and Mulder, Lennart and Bogina, Giuseppe and Sanders, Joyce and Liu, Yat-Hee and van den Belt-Dusebout, Alexandra W. and Tessier, Leslie and Balkenhol, Maschenka and Stegeman, Michelle and Hoven, Jeffrey and Wesseling, Jelle and van der Laak, Jeroen and Lips, Esther H. and Ciompi, Francesco}, - title = {PROACTING: predicting pathological complete response to neoadjuvant chemotherapy in breast cancer from routine diagnostic histopathology biopsies with deep learning}, - doi = {10.1186/s13058-023-01726-0}, - url = {http://dx.doi.org/10.1186/s13058-023-01726-0}, - volume = {25}, - abstract = {Abstract - Background - Invasive breast cancer patients are increasingly being treated with neoadjuvant chemotherapy; however, only a fraction of the patients respond to it completely. To prevent overtreatment, there is an urgent need for biomarkers to predict treatment response before administering the therapy. - - Methods - In this retrospective study, we developed hypothesis-driven interpretable biomarkers based on deep learning, to predict the pathological complete response (pCR, i.e., the absence of tumor cells in the surgical resection specimens) to neoadjuvant chemotherapy solely using digital pathology H&E images of pre-treatment breast biopsies. Our approach consists of two steps: First, we use deep learning to characterize aspects of the tumor micro-environment by detecting mitoses and segmenting tissue into several morphology compartments including tumor, lymphocytes and stroma. Second, we derive computational biomarkers from the segmentation and detection output to encode slide-level relationships of components of the tumor microenvironment, such as tumor and mitoses, stroma, and tumor infiltrating lymphocytes�(TILs). - - Results - We developed and evaluated our method on slides from n�=�721 patients from three European medical centers with triple-negative and Luminal B breast cancers and performed external independent validation on n�=�126 patients from a public dataset. We report the predictive value of the investigated biomarkers for predicting pCR with areas under the receiver operating characteristic curve between 0.66 and 0.88 across the tested cohorts. - - Conclusion - The proposed computational biomarkers predict pCR, but will require more evaluation and finetuning for clinical application. Our results further corroborate the potential role of deep learning to automate TILs quantification, and their predictive value in breast cancer neoadjuvant treatment planning, along with automated mitoses quantification. We made our method publicly available to extract segmentation-based biomarkers for research purposes.}, - automatic = {yes}, - citation-count = {0}, - file = {Aswo23.pdf:pdf\\Aswo23.pdf:PDF}, - journal = {Breast Cancer Research}, - optnote = {DIAG, RADIOLOGY}, - year = {2023}, -} - -@Comment{jabref-meta: databaseType:biblatex;} - -@Comment{jabref-meta: saveActions:disabled; -all-text-fields[identity] -date[normalize_date] -month[normalize_month] -pages[normalize_page_numbers] -;}