diff --git a/.jenkins/build.sh b/.jenkins/build.sh index aa3ac9ee2b..e93e1c8212 100755 --- a/.jenkins/build.sh +++ b/.jenkins/build.sh @@ -24,10 +24,10 @@ pip install --progress-bar off -r $DIR/../requirements.txt #Install PyTorch Nightly for test. # Nightly - pip install --pre torch torchvision torchaudio -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html -# Install 2.2 for testing -pip uninstall -y torch torchvision torchaudio torchtext torchdata -pip3 install torch==2.2.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu121 -pip3 install torchdata torchtext --index-url https://download.pytorch.org/whl/test/cpu +# Install 2.2 for testing - uncomment to install nightly binaries (update the version as needed). +# pip uninstall -y torch torchvision torchaudio torchtext torchdata +# pip3 install torch==2.2.0 torchvision torchaudio --no-cache-dir --index-url https://download.pytorch.org/whl/test/cu121 +# pip3 install torchdata torchtext --index-url https://download.pytorch.org/whl/test/cpu # Install two language tokenizers for Translation with TorchText tutorial python -m spacy download en_core_web_sm diff --git a/index.rst b/index.rst index de3aace628..e5f0d1eaaa 100644 --- a/index.rst +++ b/index.rst @@ -3,15 +3,11 @@ Welcome to PyTorch Tutorials What's new in PyTorch tutorials? -* `Getting Started with Distributed Checkpoint (DCP) `__ -* `torch.export Tutorial `__ -* `Facilitating New Backend Integration by PrivateUse1 `__ -* `(prototype) Accelerating BERT with semi-structured (2:4) sparsity `__ -* `(prototype) PyTorch 2 Export Quantization-Aware Training (QAT) `__ -* `(prototype) PyTorch 2 Export Post Training Quantization with X86 Backend through Inductor `__ -* `(prototype) Inductor C++ Wrapper Tutorial `__ -* `How to save memory by fusing the optimizer step into the backward pass `__ -* `Tips for Loading an nn.Module from a Checkpoint `__ +* `PyTorch Inference Performance Tuning on AWS Graviton Processors `__ +* `Using TORCH_LOGS python API with torch.compile `__ +* `PyTorch 2 Export Quantization with X86 Backend through Inductor `__ +* `Getting Started with DeviceMesh `__ +* `Compiling the optimizer with torch.compile `__ .. raw:: html diff --git a/recipes_source/torch_logs.py b/recipes_source/torch_logs.py index 86bacda550..90edf4c503 100644 --- a/recipes_source/torch_logs.py +++ b/recipes_source/torch_logs.py @@ -34,53 +34,49 @@ # exit cleanly if we are on a device that doesn't support torch.compile if torch.cuda.get_device_capability() < (7, 0): - print("Exiting because torch.compile is not supported on this device.") - import sys + print("Skipping because torch.compile is not supported on this device.") +else: + @torch.compile() + def fn(x, y): + z = x + y + return z + 2 - sys.exit(0) - -@torch.compile() -def fn(x, y): - z = x + y - return z + 2 - - -inputs = (torch.ones(2, 2, device="cuda"), torch.zeros(2, 2, device="cuda")) + inputs = (torch.ones(2, 2, device="cuda"), torch.zeros(2, 2, device="cuda")) # print separator and reset dynamo # between each example -def separator(name): - print(f"==================={name}=========================") - torch._dynamo.reset() + def separator(name): + print(f"==================={name}=========================") + torch._dynamo.reset() -separator("Dynamo Tracing") + separator("Dynamo Tracing") # View dynamo tracing # TORCH_LOGS="+dynamo" -torch._logging.set_logs(dynamo=logging.DEBUG) -fn(*inputs) + torch._logging.set_logs(dynamo=logging.DEBUG) + fn(*inputs) -separator("Traced Graph") + separator("Traced Graph") # View traced graph # TORCH_LOGS="graph" -torch._logging.set_logs(graph=True) -fn(*inputs) + torch._logging.set_logs(graph=True) + fn(*inputs) -separator("Fusion Decisions") + separator("Fusion Decisions") # View fusion decisions # TORCH_LOGS="fusion" -torch._logging.set_logs(fusion=True) -fn(*inputs) + torch._logging.set_logs(fusion=True) + fn(*inputs) -separator("Output Code") + separator("Output Code") # View output code generated by inductor # TORCH_LOGS="output_code" -torch._logging.set_logs(output_code=True) -fn(*inputs) + torch._logging.set_logs(output_code=True) + fn(*inputs) -separator("") + separator("") ###################################################################### # Conclusion