Skip to content

Commit

Permalink
feat: 🎨 add task tags to clean up solution notebooks
Browse files Browse the repository at this point in the history
  • Loading branch information
neptunes5thmoon committed Jul 2, 2024
1 parent a129565 commit 634209a
Showing 1 changed file with 16 additions and 11 deletions.
27 changes: 16 additions & 11 deletions solution.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,15 +97,15 @@
# </ol>
# </div>

# %% tags=[]
# %% tags=["task"]
# TASK 1.1: initialize an upsample module
up = ... # YOUR CODE HERE

# TASK 1.2: apply your upsample module to `sample_2d_input`

# YOUR CODE HERE

# %% tags=[]
# %% tags=["task"]
# TASK 1.3: vary scale factor and mode
# YOUR CODE HERE

Expand All @@ -116,6 +116,11 @@
# SOLUTION 1.2: apply your upsample module to `sample_2d_input`
up(sample_2d_input)

# %% tags=["solution"]
# TASK 1.3: vary scale factor and mode
up3 = torch.nn.Upsample(scale_factor=3, mode="nearest")
up3(sample_2d_input)

# %% [markdown] tags=[]
# Here is an additional example on image data.

Expand All @@ -140,7 +145,7 @@
# try initializing the module and applying it to the sample input. Try varying the parameters to see how the output changes.
# </p>

# %% tags=[]
# %% tags=["task"]
# TASK 2A: Initialize max pooling and apply to sample input
# YOUR CODE HERE

Expand All @@ -161,7 +166,7 @@
# </div>


# %% tags=[]
# %% tags=["task"]
class Downsample(torch.nn.Module):
def __init__(self, downsample_factor: int):
"""Initialize a MaxPool2d module with the input downsample fator"""
Expand Down Expand Up @@ -270,7 +275,7 @@ def forward(self, x):
# If you get stuck, refer back to the <a href=https://pytorch.org/docs/stable/notes/modules.html>Module</a> documentation for hints and examples of how to define a PyTorch Module.


# %% tags=[]
# %% tags=["task"]
class ConvBlock(torch.nn.Module):
def __init__(
self,
Expand Down Expand Up @@ -406,7 +411,7 @@ def forward(self, x):
# </div>


# %% tags=[]
# %% tags=["task"]
class CropAndConcat(torch.nn.Module):
def crop(self, x, y):
"""Center-crop x to match spatial dimensions given by y."""
Expand Down Expand Up @@ -464,7 +469,7 @@ def forward(self, encoder_output, upsample_output):
# </div>


# %% tags=[]
# %% tags=["task"]
class OutputConv(torch.nn.Module):
def __init__(
self,
Expand Down Expand Up @@ -576,7 +581,7 @@ def forward(self, x):
# </div>


# %% tags=[]
# %% tags=["task"]
class UNet(torch.nn.Module):
def __init__(
self,
Expand Down Expand Up @@ -893,7 +898,7 @@ def forward(self, x):
# <p>The <code>plot_receptive_field</code> function visualizes the receptive field of a given U-Net - the square shows how many input pixels contribute to the output at the center pixel. Try it out with different U-Nets to get a sense of how varying the depth, kernel size, and downsample factor affect the receptive field of a U-Net.</p>
# </div>

# %% tags=[]
# %% tags=["task"]
from local import plot_receptive_field

new_net = ... # TASK 7: declare your U-Net here
Expand Down Expand Up @@ -943,7 +948,7 @@ def forward(self, x):
loss_function: torch.nn.Module = torch.nn.MSELoss()


# %% tags=["solution"]
# %% tags=[]
def crop(x, target):
"""Center-crop x to match spatial dimensions given by target."""

Expand Down Expand Up @@ -1058,7 +1063,7 @@ def train(
# </ol>
# </div>

# %% tags=[]
# %% tags=["task"]
model = ... # TASK 8.1: Declare your U-Net here and name it below
model_name = "my_fav_unet" # This name will be used in the tensorboard logs
logger = SummaryWriter(f"unet_runs/{model_name}")
Expand Down

0 comments on commit 634209a

Please sign in to comment.