generated from eliahuhorwitz/Academic-project-page-template
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathindex.html
411 lines (367 loc) · 34.4 KB
/
index.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
<!DOCTYPE html>
<html lang="">
<head>
<meta charset="utf-8">
<!-- Meta tags for social media banners, these should be filled in appropriatly as they are your "business card" -->
<!-- Replace the content tag with appropriate information -->
<meta name="description" content="DESCRIPTION META TAG">
<meta property="og:title" content="SOCIAL MEDIA TITLE TAG"/>
<meta property="og:description" content="SOCIAL MEDIA DESCRIPTION TAG TAG"/>
<meta property="og:url" content="URL OF THE WEBSITE"/>
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X630-->
<meta property="og:image" content="static/image/your_banner_image.png" />
<meta property="og:image:width" content="1200"/>
<meta property="og:image:height" content="630"/>
<style>
body {
background-color: rgba(37, 38, 36, 0.21);
}
</style>
<meta name="twitter:title" content="TWITTER BANNER TITLE META TAG">
<meta name="twitter:description" content="TWITTER BANNER DESCRIPTION META TAG">
<!-- Path to banner image, should be in the path listed below. Optimal dimenssions are 1200X600-->
<meta name="twitter:image" content="static/images/your_twitter_banner_image.png">
<meta name="twitter:card" content="summary_large_image">
<!-- Keywords for your paper to be indexed by-->
<meta name="keywords" content="KEYWORDS SHOULD BE PLACED HERE">
<meta name="viewport" content="width=device-width, initial-scale=1">
<title>Automated Pathology Image Analysis: Enhancing Digital Pathology with a YOLO-based Object Detection Extension for QuPath </title>
<link rel="icon" type="image/x-icon" href="static/images/tseg_icon.ico">
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
rel="stylesheet">
<link rel="stylesheet" href="static/css/bulma.min.css">
<link rel="stylesheet" href="static/css/bulma-carousel.min.css">
<link rel="stylesheet" href="static/css/bulma-slider.min.css">
<link rel="stylesheet" href="static/css/fontawesome.all.min.css">
<link rel="stylesheet"
href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
<link rel="stylesheet" href="static/css/index.css">
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
<script src="https://documentcloud.adobe.com/view-sdk/main.js"></script>
<script defer src="static/js/fontawesome.all.min.js"></script>
<script src="static/js/bulma-carousel.min.js"></script>
<script src="static/js/bulma-slider.min.js"></script>
<script src="static/js/index.js"></script>
</head>
<body>
<section class="hero">
<div class="hero-body">
<div class="container is-max-desktop">
<div class="columns is-centered">
<div class="column has-text-centered">
<h1 class="title is-1 publication-title" style="color: #006fff;">Automated Pathology Image Analysis: Enhancing Digital Pathology with a YOLO-based Object Detection Extension for QuPath </h1>
<div class="is-size-5 publication-authors">
<!-- Paper authors -->
<span class="author-block">
<a href="FIRST AUTHOR PERSONAL LINK" target="_blank">Arif Enes Aydın</a><sup>*</sup>,</span>
<span class="author-block">
<a href="SECOND AUTHOR PERSONAL LINK" target="_blank">Metehan Sarikaya</a><sup>*</sup>,</span>
</div>
<div class="is-size-5 publication-authors">
<span class="author-block">Hacettepe University<br>AIN492 End of Project Report</span>
<span class="eql-cntrb"><small><br><sup>*</sup>Indicates Equal Contribution</small></span>
</div>
<div class="column has-text-centered">
<div class="publication-links">
<!-- Arxiv PDF link -->
<span class="link-block">
<a href="https://arxiv.org/pdf/<ARXIV PAPER ID>.pdf" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Paper</span>
</a>
</span>
<!-- Supplementary PDF link -->
<span class="link-block">
<a href="static/pdfs/https://docs.google.com/document/d/1cmAKumk36DdSGgfY3ojB9LuxEamFpxDZ/edit?usp=sharing&ouid=114558933062836142660&rtpof=true&sd=true" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fas fa-file-pdf"></i>
</span>
<span>Supplementary</span>
</a>
</span>
<!-- Github link -->
<span class="link-block">
<a href="https://github.com/ae-aydin/qupath-extension-tseg" target="_blank"
class="external-link button is-normal is-rounded is-dark">
<span class="icon">
<i class="fab fa-github"></i>
</span>
<span>Code</span>
</a>
</span>
</div>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Authors -->
<section class="section">
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column">
<h2 class="title is-3" style="color: #ff0048;">Authors</h2>
<div class="columns is-centered">
<div class="column">
<img src="static/images/Author1.jpg" alt="Author 1" class="author-image" style="width: 300px;">
<h2 class="subtitle has-text-centered" style="font-family: Arial, sans-serif; font-size: 15px;">Arif Enes Aydın</h2>
</div>
<div class="column">
<img src="static/images/Author2.jpg" alt="Author 2" class="author-image" style="width: 300px;">
<h2 class="subtitle has-text-centered" style="font-family: Arial, sans-serif; font-size: 15px;">Metehan Sarikaya</h2>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- Paper abstract -->
<section class="section">
<div class="container is-max-desktop">
<!-- Abstract. -->
<div class="columns is-centered has-text-centered">
<div class="column">
<h2 class="title is-3" style="color: #ff0048;">Abstract</h2>
<div class="content has-text-justified">
<p style="font-size: 100%">
Pathology diagnosis and analysis are known for being time-consuming and prone to errors. Digital pathology analysis tools have been introduced to address these challenges, but limitations still persist. Our project was undertaken to address a weakness in pathology analysis tools by leveraging artificial intelligence. Qupath, the tool in question, is widely acknowledged for its utility in assisting pathologists with the analysis of various pathology slides. It allows users to analyze slides/images mainly at cell-level. However, its functionality is constrained by its ongoing development phase and increasing demand. Notably, it lacks the capability to effectively detect specific areas, such as tumor regions. In response to this limitation, an artificial intelligence model was developed to automatically identify tumor areas, thereby enhancing the tool's functionality for cell-level operations. The decision was made to employ YOLO-based computer vision models for this purpose. These models were chosen due to their ease of use and widespread popularity within the field. Initially, consideration was given to object detection models; however, it was later determined that segmentation models within the YOLO framework would offer superior performance. Collaborating with Hacettepe University Pathology Department, we tailored the model to meet their specific needs and requirements. Utilizing data provided by the department in the form of whole image slides, we tiled these slides and trained a YOLO model, achieving performance with ~0.75 recall, ~0.85 precision and ~0.8 F1 score. The achieved performance of the model presents a promising foundation for assisting pathologists in the initial stages of utilization. Subsequent to model development, an extension for Qupath was created to facilitate seamless integration of the AI model. This extension enables users to automatically identify desired areas, particularly tumor regions, with a single click. The main aim of this project is to accelerate the workflow of pathologists and alleviate their workload through the deployment of innovative solutions. Ongoing efforts are directed towards further refinement of the model's performance and optimization of inference speed to enhance diagnostic capabilities.
</p>
</div>
</div>
</div>
</div>
</section>
<!-- End paper abstract -->
<!-- Paper Introduction -->
<section class="section">
<div class="container is-max-desktop">
<!-- Introduction. -->
<div class="columns is-centered has-text-centered">
<div class="column">
<h2 class="title is-3" style="color: #ff0048;">Introduction</h2>
<div class="content has-text-justified">
<p style="font-size: 100%">
Pathology diagnosis plays a crucial role in healthcare, guiding treatment decisions and prognoses for patients. However, traditional methods of pathology analysis are often time-consuming and prone to errors. With the advent of digital pathology analysis tools, there has been a significant advancement in the field, promising greater efficiency and accuracy. Nonetheless, certain limitations persist, hindering the full realization of the potential benefits of these tools. Our project seeks to address a specific weakness in pathology analysis tools through the integration of artificial intelligence (AI) techniques. The primary problem addressed by our project is the lack of robustness in current pathology analysis tools, particularly in the identification of specific regions of interest, such as tumor areas, within pathology slides. Despite the capabilities of existing tools like Qupath, which facilitate the analysis of pathology slides at a cellular level, they often fall short in accurately detecting and capturing important regions, leading to potential oversight and misdiagnosis by pathologists.
The primary focus of this project is the segmentation of breast cancer areas within pathology images, with the objective of integrating these models into digital pathology analysis tools. It's noteworthy that much of the existing research in this field concentrates on nuclei-level segmentation, which diverges from the primary aim of our project. Instead, our emphasis lies on accurately capturing breast cancer areas within pathology images, with the ultimate goal of facilitating more effective diagnostic analysis within digital pathology platforms.
Based on the identified problem and the insights gained from the literature review, our hypothesis is that integrating AI techniques, specifically YOLO-based computer vision models, into existing pathology analysis tools can enhance the detection of tumor areas and improve the overall efficiency of pathology analysis. We propose to develop a YOLO-based model trained on annotated pathology images to automatically identify tumor regions within slides. By leveraging the capabilities of YOLO models, which are known for their speed and accuracy in computer vision tasks, we aim to address the limitations of current pathology analysis tools and provide pathologists with a more efficient and reliable tool for diagnostic analysis.
In summary, our proposed solution involves the integration of AI techniques, particularly YOLO-based computer vision models, into existing pathology analysis tools to improve the accuracy and efficiency of tumor segmentation. Through this approach, we aim to enhance the diagnostic capabilities of pathologists and ultimately improve patient outcomes in the field of pathology.
</p>
</div>
</div>
</div>
</div>
</section>
<!-- End paper Introduction -->
<!-- Paper Related Work -->
<section class="section">
<div class="container is-max-desktop">
<!-- Related Work. -->
<div class="columns is-centered has-text-centered">
<div class="column">
<h2 class="title is-3" style="color: #ff0048;" >Related Work</h2>
<div class="content has-text-justified">
<p style="font-size: 100%">
The progress in breast cancer diagnosis through image analysis is extensive and developed significantly over the years. Initially, researchers focused on traditional machine learning methods like KNN(Knearest neighbor with K = 5), NB (Naive Bayes with kernel density), DT ( decision tree), and SVM(Support Vector Machine). The main problem of these methods was they used small datasets and weak to extract complex image patterns. To solve small dataset problems they used labor-intensive feature engineering. However, deep learning techniques have emerged as a powerful alternative, capable of handling large datasets and automatically extracting abstract features from data.
Several notable studies have proved the efficacy of both traditional machine learning and deep learning methods in breast cancer based on histopathological images. For instance, Zhang et al. (2013) introduced a cascade random subspace ensemble scheme for microscopic biopsy image classification, achieving a high classification accuracy of 99.25%. Similarly, Kowal et al. (2013) and Filipczuk et al. (2013) employed clustering algorithms and traditional machine-learning methods on 500 real-case medical images from 50 patients and achieved breast cancer image classification with approximately 96–100% high accuracy.
While machine learning methods were giving great results in classifying breast cancer, the sizes of the datasets are growing over time. Spanhol et al. (2016b) introduced a dataset called BreakHis. BreakHis consists of 7909 breast cancer histopathology images acquired from 82 patients. Aksac et al.(2019) introduced a dataset of 162 breast cancer histopathology images, namely the breast cancer histopathological annotation and diagnosis dataset (BreCaHAD).
With the progress of datasets, deep learning methods like Convolutional Neural Networks (CNNs), have shown remarkable performance in breast cancer diagnosis. Spanhol et al. (2016b) employed the BreaHis dataset for histopathology image classification. Initially adopting LeNet, their efforts failed to surpass prior achievements, which stood at 72%. Therefore they continued with a variant of AlexNet (Krizhevsky et al., 2012) that improved classification accuracy by 4–6%. and Bayramoglu et al. (2016) utilized CNNs to classify histopathological images, achieving competitive results compared to traditional machine learning approaches. Moreover, studies like Araújo et al. (2017) and Han et al. (2017) explored CNN-based methods for multi-class classification, providing valuable insights for diagnosis and prognosis.
However, limitations regarding the size of available datasets persist. To address this limitation, transfer learning techniques have been proposed, leveraging pre-trained models on large datasets like ImageNet to improve performance on smaller datasets. Transfer learning is a method that has proven itself to deal with real-world problems. It is based on repurposing knowledge from one task to another, increasing model performance by using previously learned features, reducing the need for hand-labeled data, and accelerating training in domains like computer vision. For instance, Nawaz et al. (2018) and Motlagh et al. (2018) employed transfer learning with models like DenseNet and ResNet_V1_152 to achieve high accuracy in breast cancer classification. D. Sanchez-Morillo and M.A. Fernandez-Granero et al. (2020) employed the DeepLabv3+ segmentation method that takes modified versions of the Mobilenetv2 (Sandler, Howard, Zhu, Zhmoginov & Chen, 2018), Xception or ResNet architectures as a backbone network to achieve nearly 64% MIoU and 93% FWIoU.
</p>
</div>
</div>
</div>
</div>
</section>
<!-- End paper Related Work -->
<section class="section">
<div class="container is-max-desktop">
<!-- Methodology. -->
<div class="columns is-centered has-text-centered">
<div class="column">
<h2 class="title is-3" style="color: #ff0048;"> Methodology</h2>
<div class="content has-text-justified">
<br><br>
Our work began by investigating the requirements of the task and understanding the specific needs of the pathology department, given that they would be providing the data and conducting the annotations. Through meetings, we concluded that accurately capturing the precise shape of tumor areas is essential for pathologists. It became evident that the existing tool in question, QuPath, lacks the capability to differentiate between tumor and non-tumor areas when annotations are made using rectangles (bounding boxes). Consequently, the coexistence of both tumor and non-tumor regions within a single rectangle would be mostly unavoidable. As a result, the focus of our approach shifted towards segmentation rather than object detection. While awaiting annotations, our attention turned to preparing the data for compatibility with deep learning models, specifically YOLO.
The annotations were being made on QuPath, since it offers the flexibility to annotate data in various formats relevant to computer vision, including bounding boxes for object detection and polygons for segmentation. However, the challenge arose from the size and resolution of pathology slides, often exceeding 5-10 gigabytes and reaching resolutions of up to 100,000 x 100,000 pixels. Moreover, these slides are stored in a specific file format, making direct usage with the model impossible without dividing it into smaller tiles.
<br><br><br>
<img src="static/images/annotedSlide.jpeg" class="interpolation-image" alt="Annotated pathology slide" style="display: block; margin-left: auto; margin-right: auto; width: 60%;"/>
<h2 class="subtitle has-text-centered" style="font-family: Arial, sans-serif; font-size: 12px;">Annotated pathology slide</h2>
<br>
To address this, we explored QuPath's built-in scripting tool, which utilizes the Groovy language. This tool provides functions to interact with QuPath, enabling diverse import/export and analysis capabilities. Our focus then shifted to leveraging this scripting tool to achieve tiling functionality. Through scripting within QuPath, we devised a method to extract annotated areas as tiles from the currently opened image, alongside their segmentation masks.
<br><br><br>
<img src="static/images/tile_argument.png" class="interpolation-image" alt="Tiling arguments" style="display: block; margin-left: auto; margin-right: auto; width: 60%;"/>
<h2 class="subtitle has-text-centered" style="font-family: Arial, sans-serif; font-size: 12px;">Tiling arguments</h2>
<br>
We used the parameters given in the code snippet:
<br><br>
<span style="font-weight: bold">classNames:</span> An array containing the names of classes or labels. In our project, we have one class named "Tumor".<br>
<span style="font-weight: bold">downsample:</span> A factor by which the image is downsampled. It's set to 3, meaning the image is reduced in size by a factor of 3. We experimented with various downsampling values, but when we opted for a high factor, we ended up with a limited number of images. Given that our dataset was already smaller than the one used in the reference project, we aimed to avoid further loss of information.<br>
<span style="font-weight: bold">patchSize:</span> Defines the size of patches or segments of the image. It's set to 640, which corresponds to the input size of YOLO.<br>
<span style="font-weight: bold">pixelOverlap:</span> The amount of overlap between adjacent patches, measured in pixels. It's set to 160, suggesting that adjacent patches will overlap by 160 pixels.<br>
<span style="font-weight: bold">imageExtension:</span> Represents the file extension of the image files. It's set to ".png", denoting that the images are formatted in PNG. PNG format was chosen over JPG or TIFF due to its compatibility and superior quality.<br>
<span style="font-weight: bold">multiChannel:</span> A boolean variable indicating whether the images are multi-channel or not.<br>
<span style="font-weight: bold">onlyAnnotated:</span> Another boolean option. When set to true, it indicates that only annotated images will undergo processing. We set it to true to isolate the annotated regions from the background. Without this setting, the scanning of images resulted in a high presence of background, and the tiling process without background cleaning led to a lower frequency of annotated regions.<br>
<img src="static/images/tisue_mask.png" class="interpolation-image" alt="An example tile and its mask." style="display: block; margin-left: auto; margin-right: auto; width: 60%;"/>
<h2 class="subtitle has-text-centered" style="font-family: Arial, sans-serif; font-size: 12px;">An example tile and its mask.</h2>
<br>
After the completion of annotations and the creation of tiles for each slide, all tiles and masks are accumulated within a directory for further processing. However, YOLO's data format differs from other models, as it represents each annotation as a text file with each object represented as a line in the following format:
<br><br>
To accommodate this format, we converted the masks by extracting polygons and saving these polygons in the appropriate format. Notably, the default YOLO format does not support the representation of polygons with holes. To address this limitation, we found a solution that involves connecting inner holes with outer circles, which has been published as open source. This approach enables us to accurately represent holes within the polygons with YOLO format, without deforming actual polygon coordinates.
<a href="https://github.com/ryouchinsa/Rectlabel-support/blob/master/general_json2yolo.py">link</a>
<br><br><br>
<img src="static/images/mask.png" class="interpolation-image" alt="An example original mask, and visualization of the created YOLO format." style="display: block; margin-left: auto; margin-right: auto; width: 60%;"/>
<h2 class="subtitle has-text-centered" style="font-family: Arial, sans-serif; font-size: 12px;">An example original mask, and visualization of the created YOLO format.</h2>
<br>
<span style="font-weight: bold">Model Training</span> As described in the dataset and introduction part, the aim of this project was the segmentation of tumor-contained areas. Due to its notable speed and efficiency, we chose YOLO as the base model over state-of-the-art instance segmentation models like Fast R-CNN, Mask R-CNN, and Panoptic FPN. YOLO, short for "You Only Look Once," has emerged as a pivotal technology in various domains due to its real-time object detection capabilities. The model works based on the image’s grids. The algorithm of this process works by dividing the input image into a grid of cells. Each cell is responsible for predicting bounding boxes and class probabilities for an object within it. This approach allows YOLO to be computationally efficient. In this context, YOLOv8 represents a single-stage object detection model. The single-stage detector works by processing all grids at once. A key disparity between a two-stage detector and a single-stage detector lies in how they handle processing. Specifically, in the two-stage approach, the processing of regions of interest and classification occurs on different cycles.
<br><br>
<span style="font-weight: bold">Extension</span> After obtaining the model, we explored various methods to integrate it into QuPath. Since QuPath is written in Java, which is not widely used in machine learning, it lacks the convenience of tools and libraries for seamless integration with machine learning models. Additionally, the ultralytics library, which facilitates YOLO model training and predictions in Python, presents challenges due to its complex class structures and design, making implementation of model inference through Java more difficult, especially considering time constraints. Our approach involved using a Python script to handle model inference and output conversion, while restricting the extension to input/output tasks only. Although this approach requires additional setup, including the presence of Python and the script on the computer, it offers significantly more flexibility in terms of model integration. By separating the model inference process from Java, we were able to overcome the limitations posed by the language and leverage the rich ecosystem of machine learning tools available in Python.
<br><br><br>
<img src="static/images/qupath.jpeg" class="interpolation-image" alt="Extension interface. Containing inputs for script directory (which contains script, model and virtual environment), model confidence, model IoU. The outputs are displayed on the text field at the bottom." style="display: block; margin-left: auto; margin-right: auto; width: 60%;"/>
<h2 class="subtitle has-text-centered" style="font-family: Arial, sans-serif; font-size: 12px;">Extension interface.</h2>
<br>
<p style="font-size: 100%">
The overall workflow of the extension is as follows:
<br>
<li><span style="font-weight: bold">(1.)</span> A region of interest (ROI) is selected within QuPath, which can be of any size but must be in the form of a rectangle.</li>
<br>
<li><span style="font-weight: bold">(2.)</span> Upon pressing the "Segment Selected Region" button:</li>
<br>
<li><span style="font-weight: bold">(2.a.)</span> The selected region is tiled to match the size required for model inference. These tiled images are temporarily saved within the script directory.</li>
<br>
<li><span style="font-weight: bold">(2.b.)</span> The script executes, inferring each tile and reconstructing the main mask matching selected ROI in size. Once the main ROI segment mask is generated, the polygons representing the segmented areas are extracted and converted to the GEOJSON format, which QuPath's input/output operations can process. This file is temporarily saved
<br>
<li><span style="font-weight: bold">(3.)</span> After the script completes its execution, the extension reads the GEOJSON file and imports the predictions, which are then displayed on the QuPath screen. Subsequently, the extension cleans up the temporary files and prepares for the next prediction.</li>
<br>
<img src="static/images/workflow.jpeg" class="interpolation-image" alt="Workflow of the extension." style="display: block; margin-left: auto; margin-right: auto; width: 60%;"/>
<h2 class="subtitle has-text-centered" style="font-family: Arial, sans-serif; font-size: 12px;">Workflow of the extension.</h2>
</p>
</div>
</div>
</div>
</div>
</section>
<div class="container is-max-desktop">
<div class="columns is-centered has-text-centered">
<div class="column">
<h2 class="title is-3" style="color: #ff0048;">Results </h2>
<div class="content has-text-justified">
<p style="font-size: 100%">
Our best performing model was achieved with YOLOv8m-seg. The obtained validation loss graphs indicate that the training was consistent and reached a plateau, suggesting that the model learned everything it could and training concluded before overfitting occurred. Despite our efforts, our best-performing model achieved only a modest ~0.75 [email protected] score, which falls below the desired standard for medical applications of AI. This score may be attributed to the limited quantity of data available for training. When inspecting the confusion matrix, it's important to note that there are no True Negatives (TN) since there is only a single class. From confusion matrix, we can deduce the following metrics:
<br>
Recall: TP / (TP + FN) = 0.735<br>
Precision: TP / (TP + FP) = 0.842<br>
F1 Score: 2 * (Recall * Precision) / (Recall + Precision) = 0.785 <br>
Additionally, since the number of False Negatives (FN) is greater than the number of False Positives (FP), it suggests that the model tends to miss more instances than it misclassifies. This observation may indicate that the model could benefit from either a larger and more diverse training set or improved annotations.
From these graphs, it can be inferred that while the model's performance may not be groundbreaking, it represents a solid starting point, particularly when considering the limited quantity of data available for training. Even though the model appears to capture most of the areas, this is largely due to the nature of the currently annotated data, which includes stained tumor areas. There are situations where tumor areas are not stained but can still be detected by a professional pathologist, as well as instances of stained areas that are not tumors. Our current aim is to enhance the model's ability to differentiate and successfully capture these challenging areas.
</p>
</div>
</div>
</div>
</div>
</section>
<section class="hero is-small">
<div class="hero-body">
<div class="container">
<div id="results-carousel" class="carousel results-carousel">
<div class="item has-text-centered">
<!-- Your image here -->
<img src="static/images/loss.jpg" alt="Models train-validation loss plots"/>
<h2 class="subtitle has-text-centered">
Models train-validation loss plots.
</h2>
</div>
<div class="item has-text-centered">
<!-- Your image here -->
<img src="static/images/Accuracy.jpg" alt="Model’s performance metrics"/>
<h2 class="subtitle has-text-centered">
Model’s performance metrics.
</h2>
</div>
<div class="item has-text-centered">
<!-- Your image here -->
<img src="static/images/ROC.png" alt="Precision-Recall Curve and [email protected] value"/>
<h2 class="subtitle has-text-centered">
Precision-Recall Curve and [email protected] value.
</h2>
</div>
<div class="item has-text-centered">
<!-- Your image here -->
<img src="static/images/F1Conficence.png" alt="F1 - Confidence Curve. The best confidence value found as 0.341"/>
<h2 class="subtitle has-text-centered">
F1 - Confidence Curve. The best confidence value found as 0.341
</h2>
</div>
<div class="item has-text-centered">
<!-- Your image here -->
<img src="static/images/ConfusionMatrix.png" alt="Confusion matrix"/>
<h2 class="subtitle has-text-centered">
Confusion matrix.
</h2>
</div>
</div>
</div>
</div>
</section>
<!-- End image carousel -->
<!-- Youtube video -->
<section class="hero is-small is-light">
<div class="hero-body">
<div class="container">
<!-- Paper video. -->
<h2 class="title is-3"style="color: #ff0048;">Video Presentation</h2>
<div class="columns is-centered has-text-centered">
<div class="column is-four-fifths">
<div class="publication-video">
<!-- Youtube embed code here -->
<iframe src="https://www.youtube.com/embed/hG85qM24Qno?si=83Zp4VHNC9T6BZv2" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
</div>
</div>
</div>
</div>
</div>
</section>
<!-- End youtube video -->
<!-- Paper poster -->
<section class="hero is-small is-light">
<div class="hero-body">
<div class="container">
<h2 class="title"style="color: #ff0048;">Poster</h2>
<iframe src="static/pdfs/poster.pdf" width="100%" height="550">
</iframe>
</div>
</div>
</section>
<!--End paper poster -->
<!--BibTex citation -->
<section class="section" id="BibTeX">
<div class="container is-max-desktop content">
<h2 class="title">BibTeX</h2>
<pre><code>BibTex Code Here</code></pre>
</div>
</section>
<!--End BibTex citation -->
<footer class="footer">
<div class="container">
<div class="columns is-centered">
<div class="column is-8">
<div class="content">
<p>
This page was built using the <a href="https://github.com/eliahuhorwitz/Academic-project-page-template" target="_blank">Academic Project Page Template</a> which was adopted from the <a href="https://nerfies.github.io" target="_blank">Nerfies</a> project page.
You are free to borrow the of this website, we just ask that you link back to this page in the footer. <br> This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/" target="_blank">Creative
Commons Attribution-ShareAlike 4.0 International License</a>.
</p>
</div>
</div>
</div>
</div>
</footer>
<!-- Statcounter tracking code -->
<!-- You can add a tracker to track page visits by creating an account at statcounter.com -->
<!-- End of Statcounter Code -->
</body>
</html>