-
Notifications
You must be signed in to change notification settings - Fork 479
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Added COVIDNet CXR-S and COVIDxSev for airspace grading to scripts
- Loading branch information
Maya Pavlova
committed
Apr 16, 2021
1 parent
74eaec6
commit aeaf3d0
Showing
13 changed files
with
1,265 additions
and
20 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,6 @@ | ||
.ipynb_checkpoints | ||
data | ||
data_sev | ||
.DS_Store | ||
*.zip | ||
__pycache__ | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,253 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 72, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"import numpy as np\n", | ||
"import pandas as pd\n", | ||
"import os\n", | ||
"import random \n", | ||
"from shutil import copyfile\n", | ||
"import pydicom as dicom\n", | ||
"import cv2\n", | ||
"import mdai\n", | ||
"import json\n", | ||
"from collections import Counter\n", | ||
"from pathlib import Path" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 73, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"# set data directory here\n", | ||
"savepath = 'data_sev'\n", | ||
"Path(os.path.join(savepath, 'test')).mkdir(parents=True, exist_ok=True)\n", | ||
"Path(os.path.join(savepath, 'train')).mkdir(parents=True, exist_ok=True)\n", | ||
"\n", | ||
"seed = 0\n", | ||
"np.random.seed(seed) # Reset the seed so all runs are the same.\n", | ||
"random.seed(seed)\n", | ||
"MAXVAL = 255 # Range [0 255]\n", | ||
"\n", | ||
"# COVIDxSev requires the path to the ricord annotations to also be downloaded\n", | ||
"ricord_annotations = 'create_ricord_dataset/1c_mdai_rsna_project_MwBeK3Nr_annotations_labelgroup_all_2021-01-08-164102.json'\n", | ||
"\n", | ||
"# path to ricord covid-19 images created by create_ricord_dataset/create_ricord_dataset.ipynb\n", | ||
"# run create_ricord_dataset.ipynb before this notebook\n", | ||
"ricord_imgpath = 'create_ricord_dataset/ricord_images'\n", | ||
"ricord_txt = 'create_ricord_dataset/ricord_data_set.txt'\n", | ||
"ricord_studyids = 'create_ricord_dataset/ricord_patientid_to_studyid_mapping.json'\n", | ||
"\n", | ||
"\n", | ||
"\n", | ||
"# parameters for COVIDx dataset\n", | ||
"train = []\n", | ||
"test = []\n", | ||
"test_count = {'level1': 0,'level2': 0, 'NA': 0}\n", | ||
"train_count = {'level1': 0,'level2': 0, 'NA': 0}\n", | ||
"\n", | ||
"\n", | ||
"\n", | ||
"# to avoid duplicates\n", | ||
"patient_imgpath = {}" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 74, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"mapping = {}\n", | ||
"mapping['Mild Opacities (1-2 lung zones)'] = 'level1'\n", | ||
"mapping['Moderate Opacities (3-4 lung zones)'] = 'level2'\n", | ||
"mapping['Severe Opacities (>4 lung zones)'] = 'level2'\n", | ||
"mapping['Invalid Study'] = 'NA'\n", | ||
"\n", | ||
"classification=[\"Typical Appearance\",\"Indeterminate Appearance\",\"Atypical Appearance\",\"Negative for Pneumonia\"]\n", | ||
"airspace_Disease_Grading=[\"Mild Opacities (1-2 lung zones)\",\"Moderate Opacities (3-4 lung zones)\",\"Severe Opacities (>4 lung zones)\",\"Invalid Study\"]\n", | ||
"\n", | ||
" \n", | ||
" \n", | ||
"def get_label_study(annotations_df, studyid):\n", | ||
" airspace_grading_labels = []\n", | ||
" labels = annotations_df[\"annotations\"].loc[annotations_df[\"annotations\"][\"StudyInstanceUID\"]==studyid][\"labelName\"]\n", | ||
"# print(labels)\n", | ||
" for label in list(labels):\n", | ||
" if label in mapping.keys():\n", | ||
" airspace_grading_labels.append(mapping[label])\n", | ||
" \n", | ||
" severity = Counter(airspace_grading_labels).most_common()[0][0] if airspace_grading_labels else 'NA'\n", | ||
" return severity\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 75, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Data distribution from covid datasets:\n", | ||
"{'level1': 226, 'level2': 683, 'NA': 187}\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"filename_label = {'level1': [],'level2': [], 'NA': []}\n", | ||
"count = {'level1': 0,'level2': 0, 'NA':0}\n", | ||
"covid_ds = {'ricord': []}\n", | ||
" \n", | ||
"# get ricord file names \n", | ||
"with open(ricord_txt) as f:\n", | ||
" ricord_file_names = [line.split()[0] for line in f]\n", | ||
" \n", | ||
"# get study ids for every patientid\n", | ||
"with open(ricord_studyids, 'r') as f:\n", | ||
" studyids = json.load(f)\n", | ||
" \n", | ||
"# load ricord annotations\n", | ||
"annotations = mdai.common_utils.json_to_dataframe(ricord_annotations)\n", | ||
"\n", | ||
"for imagename in ricord_file_names:\n", | ||
" patientid = imagename.split('-')[3] + '-' + imagename.split('-')[4]\n", | ||
" study_uuid = imagename.split('-')[-2]\n", | ||
" \n", | ||
" # get complete study id from ricord_studyids json file to match to labels stored in ricord annotations\n", | ||
" for studyid in studyids[patientid]:\n", | ||
" if studyid[-5:] == study_uuid:\n", | ||
" severity_level = get_label_study(annotations, studyid)\n", | ||
" break\n", | ||
" count[severity_level] += 1\n", | ||
" entry = [patientid, imagename, severity_level, 'ricord']\n", | ||
" filename_label[severity_level].append(entry)\n", | ||
" \n", | ||
" covid_ds['ricord'].append(patientid)\n", | ||
" \n", | ||
"print('Data distribution from covid datasets:')\n", | ||
"print(count)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 76, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"test count: {'level1': 52, 'level2': 98, 'NA': 0}\n", | ||
"train count: {'level1': 174, 'level2': 585, 'NA': 0}\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"# Write images into train and test directories accordingly\n", | ||
"\n", | ||
"# get test patients from label file\n", | ||
"with open('labels/test_COVIDxSev.txt', 'r') as f:\n", | ||
" test_patients = [line.split()[0] for line in f]\n", | ||
"\n", | ||
"for label in filename_label.keys():\n", | ||
" # Skip all studyies that do not have an airspace grading\n", | ||
" if label != 'NA':\n", | ||
" for image in filename_label[label]:\n", | ||
" patientid = image[0]\n", | ||
" if patientid in test_patients:\n", | ||
" copyfile(os.path.join(ricord_imgpath, image[1]), os.path.join(savepath, 'test', image[1]))\n", | ||
" test.append(image)\n", | ||
" test_count[image[2]] += 1\n", | ||
" else:\n", | ||
" copyfile(os.path.join(ricord_imgpath, image[1]), os.path.join(savepath, 'train', image[1]))\n", | ||
" train.append(image)\n", | ||
" train_count[image[2]] += 1\n", | ||
"\n", | ||
"print('test count: ', test_count)\n", | ||
"print('train count: ', train_count)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 77, | ||
"metadata": {}, | ||
"outputs": [ | ||
{ | ||
"name": "stdout", | ||
"output_type": "stream", | ||
"text": [ | ||
"Final stats\n", | ||
"Train count: {'level1': 174, 'level2': 585, 'NA': 0}\n", | ||
"Test count: {'level1': 52, 'level2': 98, 'NA': 0}\n", | ||
"Total length of train: 759\n", | ||
"Total length of test: 150\n" | ||
] | ||
} | ||
], | ||
"source": [ | ||
"# final stats\n", | ||
"print('Final stats')\n", | ||
"print('Train count: ', train_count)\n", | ||
"print('Test count: ', test_count)\n", | ||
"print('Total length of train: ', len(train))\n", | ||
"print('Total length of test: ', len(test))" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": 78, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [ | ||
"# export to train and test files\n", | ||
"# format as patientid, filename, label, separated by a space\n", | ||
"# where label is either \"level1\" for mild air space grading or \"level2\" for moderate and severe grading\n", | ||
"with open(\"train_split.txt\",'w') as train_file:\n", | ||
" for sample in train:\n", | ||
" info = str(sample[0]) + ' ' + sample[1] + ' ' + sample[2] + ' ' + sample[3] + '\\n'\n", | ||
" train_file.write(info)\n", | ||
"\n", | ||
"with open(\"test_split.txt\", 'w') as test_file:\n", | ||
" for sample in test:\n", | ||
" info = str(sample[0]) + ' ' + sample[1] + ' ' + sample[2] + ' ' + sample[3] + '\\n'\n", | ||
" test_file.write(info)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": {}, | ||
"outputs": [], | ||
"source": [] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.6.9" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 4 | ||
} |
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.