diff --git a/exercise.ipynb b/exercise.ipynb
index f2a4d0f..ed07045 100644
--- a/exercise.ipynb
+++ b/exercise.ipynb
@@ -163,7 +163,7 @@
"tags": []
},
"source": [
- "We will start with the Upsample module that we will use in our U-Net. The right side of the U-Net contains upsampling between the levels. There are many ways to upsample: in the original U-Net, they use a transposed convolution, but this has since fallen a bit out of fashion so we will use the PyTorch Upsample Module [torch.nn.Upsample](https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html#torch.nn.Upsample) instead."
+ "We will start with the Upsample module that we will use in our U-Net. The right side of the U-Net contains upsampling between the levels. There are many ways to upsample: in the original U-Net, they used a transposed convolution, but this has since fallen a bit out of fashion so we will use the PyTorch Upsample Module [torch.nn.Upsample](https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html#torch.nn.Upsample) instead."
]
},
{
@@ -385,7 +385,10 @@
"\n",
" def forward(self, x):\n",
" if not self.check_valid(tuple(x.size()[-2:])):\n",
- " raise RuntimeError(\"Can not downsample shape %s with factor %s\" % (x.size(), self.downsample_factor))\n",
+ " raise RuntimeError(\n",
+ " \"Can not downsample shape %s with factor %s\"\n",
+ " % (x.size(), self.downsample_factor)\n",
+ " )\n",
"\n",
" return self.down(x)"
]
@@ -823,7 +826,7 @@
"source": [
"## Putting the U-Net together\n",
"\n",
- "Now we will make a U-Net class that combines all of these components as shown in the image. This image shows a U-Net of depth 5 with specific input channels, feature maps, upsampling, and final activation. Ours will be configurable with regards to depth and other features.\n",
+ "Now we will make a U-Net class that combines all of these components as shown in the image. This image shows a U-Net of depth 4 with specific input channels, feature maps, upsampling, and final activation. Ours will be configurable with regards to depth and other features.\n",
"\n",
"
"
]
@@ -1332,11 +1335,17 @@
" # log to tensorboard\n",
" if tb_logger is not None:\n",
" step = epoch * len(loader) + batch_id\n",
- " tb_logger.add_scalar(tag=\"train_loss\", scalar_value=loss.item(), global_step=step)\n",
+ " tb_logger.add_scalar(\n",
+ " tag=\"train_loss\", scalar_value=loss.item(), global_step=step\n",
+ " )\n",
" # check if we log images in this iteration\n",
" if step % log_image_interval == 0:\n",
- " tb_logger.add_images(tag=\"input\", img_tensor=x.to(\"cpu\"), global_step=step)\n",
- " tb_logger.add_images(tag=\"target\", img_tensor=y.to(\"cpu\"), global_step=step)\n",
+ " tb_logger.add_images(\n",
+ " tag=\"input\", img_tensor=x.to(\"cpu\"), global_step=step\n",
+ " )\n",
+ " tb_logger.add_images(\n",
+ " tag=\"target\", img_tensor=y.to(\"cpu\"), global_step=step\n",
+ " )\n",
" tb_logger.add_images(\n",
" tag=\"prediction\",\n",
" img_tensor=prediction.to(\"cpu\").detach(),\n",
diff --git a/solution.ipynb b/solution.ipynb
index 14df61e..6b6a077 100644
--- a/solution.ipynb
+++ b/solution.ipynb
@@ -163,7 +163,7 @@
"tags": []
},
"source": [
- "We will start with the Upsample module that we will use in our U-Net. The right side of the U-Net contains upsampling between the levels. There are many ways to upsample: in the original U-Net, they use a transposed convolution, but this has since fallen a bit out of fashion so we will use the PyTorch Upsample Module [torch.nn.Upsample](https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html#torch.nn.Upsample) instead."
+ "We will start with the Upsample module that we will use in our U-Net. The right side of the U-Net contains upsampling between the levels. There are many ways to upsample: in the original U-Net, they used a transposed convolution, but this has since fallen a bit out of fashion so we will use the PyTorch Upsample Module [torch.nn.Upsample](https://pytorch.org/docs/stable/generated/torch.nn.Upsample.html#torch.nn.Upsample) instead."
]
},
{
@@ -390,7 +390,10 @@
"\n",
" def forward(self, x):\n",
" if not self.check_valid(tuple(x.size()[-2:])):\n",
- " raise RuntimeError(\"Can not downsample shape %s with factor %s\" % (x.size(), self.downsample_factor))\n",
+ " raise RuntimeError(\n",
+ " \"Can not downsample shape %s with factor %s\"\n",
+ " % (x.size(), self.downsample_factor)\n",
+ " )\n",
"\n",
" return self.down(x)"
]
@@ -551,9 +554,13 @@
"\n",
" # SOLUTION 3.1: Initialize your modules and define layers in conv pass\n",
" self.conv_pass = torch.nn.Sequential(\n",
- " torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding),\n",
+ " torch.nn.Conv2d(\n",
+ " in_channels, out_channels, kernel_size=kernel_size, padding=padding\n",
+ " ),\n",
" torch.nn.ReLU(),\n",
- " torch.nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),\n",
+ " torch.nn.Conv2d(\n",
+ " out_channels, out_channels, kernel_size=kernel_size, padding=padding\n",
+ " ),\n",
" torch.nn.ReLU(),\n",
" )\n",
"\n",
@@ -837,7 +844,7 @@
"source": [
"## Putting the U-Net together\n",
"\n",
- "Now we will make a U-Net class that combines all of these components as shown in the image. This image shows a U-Net of depth 5 with specific input channels, feature maps, upsampling, and final activation. Ours will be configurable with regards to depth and other features.\n",
+ "Now we will make a U-Net class that combines all of these components as shown in the image. This image shows a U-Net of depth 4 with specific input channels, feature maps, upsampling, and final activation. Ours will be configurable with regards to depth and other features.\n",
"\n",
"
"
]
@@ -938,7 +945,9 @@
" # SOLUTION 6.2A: Initialize list here\n",
" for level in range(self.depth):\n",
" fmaps_in, fmaps_out = self.compute_fmaps_encoder(level)\n",
- " self.left_convs.append(ConvBlock(fmaps_in, fmaps_out, self.kernel_size, self.padding))\n",
+ " self.left_convs.append(\n",
+ " ConvBlock(fmaps_in, fmaps_out, self.kernel_size, self.padding)\n",
+ " )\n",
"\n",
" # right convolutional passes\n",
" self.right_convs = torch.nn.ModuleList()\n",
@@ -961,7 +970,9 @@
" mode=self.upsample_mode,\n",
" )\n",
" self.crop_and_concat = CropAndConcat()\n",
- " self.final_conv = OutputConv(self.compute_fmaps_decoder(0)[1], self.out_channels, self.final_activation)\n",
+ " self.final_conv = OutputConv(\n",
+ " self.compute_fmaps_decoder(0)[1], self.out_channels, self.final_activation\n",
+ " )\n",
"\n",
" def compute_fmaps_encoder(self, level: int) -> tuple[int, int]:\n",
" \"\"\"Compute the number of input and output feature maps for\n",
@@ -1000,7 +1011,9 @@
" \"\"\"\n",
" # SOLUTION 6.1B: Implement this function\n",
" fmaps_out = self.num_fmaps * self.fmap_inc_factor ** (level)\n",
- " concat_fmaps = self.compute_fmaps_encoder(level)[1] # The channels that come from the skip connection\n",
+ " concat_fmaps = self.compute_fmaps_encoder(level)[\n",
+ " 1\n",
+ " ] # The channels that come from the skip connection\n",
" fmaps_in = concat_fmaps + self.num_fmaps * self.fmap_inc_factor ** (level + 1)\n",
"\n",
" return fmaps_in, fmaps_out\n",
@@ -1392,11 +1405,17 @@
" # log to tensorboard\n",
" if tb_logger is not None:\n",
" step = epoch * len(loader) + batch_id\n",
- " tb_logger.add_scalar(tag=\"train_loss\", scalar_value=loss.item(), global_step=step)\n",
+ " tb_logger.add_scalar(\n",
+ " tag=\"train_loss\", scalar_value=loss.item(), global_step=step\n",
+ " )\n",
" # check if we log images in this iteration\n",
" if step % log_image_interval == 0:\n",
- " tb_logger.add_images(tag=\"input\", img_tensor=x.to(\"cpu\"), global_step=step)\n",
- " tb_logger.add_images(tag=\"target\", img_tensor=y.to(\"cpu\"), global_step=step)\n",
+ " tb_logger.add_images(\n",
+ " tag=\"input\", img_tensor=x.to(\"cpu\"), global_step=step\n",
+ " )\n",
+ " tb_logger.add_images(\n",
+ " tag=\"target\", img_tensor=y.to(\"cpu\"), global_step=step\n",
+ " )\n",
" tb_logger.add_images(\n",
" tag=\"prediction\",\n",
" img_tensor=prediction.to(\"cpu\").detach(),\n",
@@ -1460,7 +1479,9 @@
},
"outputs": [],
"source": [
- "model = UNet(depth=4, in_channels=1) # SOLUTION 8.1: Declare your U-Net here and name it below\n",
+ "model = UNet(\n",
+ " depth=4, in_channels=1\n",
+ ") # SOLUTION 8.1: Declare your U-Net here and name it below\n",
"model_name = \"my_fav_unet\" # This name will be used in the tensorboard logs\n",
"logger = SummaryWriter(f\"unet_runs/{model_name}\")"
]