2025-04-25 21:28:00 -07:00
#!/bin/bash
2019-07-12 21:19:42 -04:00
#
# This script runs through the code in each of the python examples.
2022-01-20 23:19:54 +08:00
# The purpose is just as an integration test, not to actually train models in any meaningful way.
# For that reason, most of these set epochs = 1 and --dry-run.
2019-07-12 21:19:42 -04:00
#
2025-04-25 21:28:00 -07:00
# Optionally specify a comma separated list of examples to run. Can be run as:
# * To run all examples:
# ./run_python_examples.sh
# * To run few specific examples:
# ./run_python_examples.sh "dcgan,fast_neural_style"
#
# To test examples on CUDA accelerator, run as:
# USE_CUDA=True ./run_python_examples.sh
#
2025-04-28 12:46:10 -07:00
# To test examples on hardware accelerator (CUDA, MPS, XPU, etc.), run as:
# USE_ACCEL=True ./run_python_examples.sh
# NOTE: USE_ACCEL relies on torch.accelerator API and not all examples are converted
# to use it at the moment. Thus, expect failures using this flag on non-CUDA accelerators
# and consider to run examples one by one.
#
2025-04-25 21:28:00 -07:00
# Script requires uv to be installed. When executed, script will install prerequisites from
# `requirements.txt` for each example. If ran within activated virtual environment (uv venv,
# python -m venv, conda) this might reinstall some of the packages. To change pip installation
# index or to pass additional pip install options, run as:
# PIP_INSTALL_ARGS="--pre -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html" \
# ./run_python_examples.sh
#
# To force script to create virtual environment for each example, run as:
# VIRTUAL_ENV=".venv" ./run_python_examples.sh
# Script will remove environments it creates in a teardown step after execution of each example.
2019-07-12 21:19:42 -04:00
2024-05-03 13:25:49 -07:00
BASE_DIR = " $( pwd ) / $( dirname $0 ) "
source $BASE_DIR /utils.sh
2024-04-02 12:50:04 -04:00
2025-04-28 12:46:10 -07:00
# TODO: Leave only USE_ACCEL and drop USE_CUDA once all examples will be converted
# to torch.accelerator API. For now, just add USE_ACCEL as an alias for USE_CUDA.
if [ -n " $USE_ACCEL " ] ; then
USE_CUDA = $USE_ACCEL
fi
2025-04-25 21:28:00 -07:00
USE_CUDA = ${ USE_CUDA :- False }
2020-07-02 00:43:39 +03:00
case $USE_CUDA in
"True" )
echo "using cuda"
CUDA = 1
CUDA_FLAG = "--cuda"
2025-04-28 12:46:10 -07:00
ACCEL_FLAG = "--accel"
2020-07-02 00:43:39 +03:00
; ;
"False" )
echo "not using cuda"
CUDA = 0
CUDA_FLAG = ""
2025-04-28 12:46:10 -07:00
ACCEL_FLAG = ""
2020-07-02 00:43:39 +03:00
; ;
"" )
exit 1;
; ;
esac
2019-07-12 21:19:42 -04:00
function dcgan( ) {
2025-05-13 13:14:47 -06:00
uv run main.py --dataset fake $ACCEL_FLAG --dry-run || error "dcgan failed"
2019-07-12 21:19:42 -04:00
}
function fast_neural_style( ) {
if [ ! -d "saved_models" ] ; then
echo "downloading saved models for fast neural style"
2025-04-25 21:28:00 -07:00
uv run download_saved_models.py
2019-07-12 21:19:42 -04:00
fi
test -d "saved_models" || { error "saved models not found" ; return ; }
echo "running fast neural style model"
2025-04-28 12:46:10 -07:00
uv run neural_style/neural_style.py eval --content-image images/content-images/amber.jpg --model saved_models/candy.pth --output-image images/output-images/amber-candy.jpg $ACCEL_FLAG || error "neural_style.py failed"
2019-07-12 21:19:42 -04:00
}
function imagenet( ) {
if [ [ ! -d "sample/val" || ! -d "sample/train" ] ] ; then
mkdir -p sample/val/n
mkdir -p sample/train/n
2022-09-20 16:18:33 -07:00
curl -O "https://upload.wikimedia.org/wikipedia/commons/5/5a/Socks-clinton.jpg" || { error "couldn't download sample image for imagenet" ; return ; }
2019-07-12 21:19:42 -04:00
mv Socks-clinton.jpg sample/train/n
cp sample/train/n/* sample/val/n/
fi
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 sample/ || error "imagenet example failed"
2025-06-30 11:52:25 -07:00
uv run main.py --epochs 1 --gpu 0 sample/ || error "imagenet example failed"
2019-07-12 21:19:42 -04:00
}
2024-04-02 12:50:04 -04:00
function language_translation( ) {
2025-04-25 21:28:00 -07:00
uv run -m spacy download en || error "couldn't download en package from spacy"
uv run -m spacy download de || error "couldn't download de package from spacy"
uv run main.py -e 1 --enc_layers 1 --dec_layers 1 --backend cpu --logging_dir output/ --dry_run || error "language translation example failed"
2024-04-02 12:50:04 -04:00
}
2019-07-12 21:19:42 -04:00
function mnist( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 --dry-run || error "mnist example failed"
2019-07-12 21:19:42 -04:00
}
2023-02-27 05:17:16 +05:30
function mnist_forward_forward( ) {
2025-04-30 08:02:42 -06:00
uv run main.py --epochs 1 --no_accel || error "mnist forward forward failed"
2019-07-12 21:19:42 -04:00
2023-02-27 05:17:16 +05:30
}
2019-07-12 21:19:42 -04:00
function mnist_hogwild( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 --dry-run $CUDA_FLAG || error "mnist hogwild failed"
2019-07-12 21:19:42 -04:00
}
2022-10-27 14:31:20 -07:00
function mnist_rnn( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 --dry-run || error "mnist rnn example failed"
2022-10-27 14:31:20 -07:00
}
2019-07-12 21:19:42 -04:00
function regression( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 $CUDA_FLAG || error "regression failed"
2019-07-12 21:19:42 -04:00
}
2022-05-07 19:10:31 -07:00
function siamese_network( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 --dry-run || error "siamese network example failed"
2022-05-07 19:10:31 -07:00
}
2019-07-12 21:19:42 -04:00
function reinforcement_learning( ) {
2025-04-25 21:28:00 -07:00
uv run reinforce.py || error "reinforcement learning reinforce failed"
uv run actor_critic.py || error "reinforcement learning actor_critic failed"
2019-07-12 21:19:42 -04:00
}
function snli( ) {
echo "installing 'en' model if not installed"
2025-04-25 21:28:00 -07:00
uv run -m spacy download en || { error "couldn't download 'en' model needed for snli" ; return ; }
2019-07-12 21:19:42 -04:00
echo "training..."
2025-04-25 21:28:00 -07:00
uv run train.py --epochs 1 --dev_every 1 --no-bidirectional --dry-run || error "couldn't train snli"
2019-07-12 21:19:42 -04:00
}
2022-04-15 10:55:35 -07:00
function fx( ) {
2025-04-25 21:28:00 -07:00
# uv run custom_tracer.py || error "fx custom tracer has failed" UnboundLocalError: local variable 'tabulate' referenced before assignment
uv run invert.py || error "fx invert has failed"
uv run module_tracer.py || error "fx module tracer has failed"
uv run primitive_library.py || error "fx primitive library has failed"
uv run profiling_tracer.py || error "fx profiling tracer has failed"
uv run replace_op.py || error "fx replace op has failed"
uv run subgraph_rewriter_basic_use.py || error "fx subgraph has failed"
uv run wrap_output_dynamically.py || error "vmap output dynamically has failed"
2022-04-15 10:55:35 -07:00
}
2019-07-12 21:19:42 -04:00
function super_resolution( ) {
2025-06-23 19:17:56 -06:00
uv run main.py --upscale_factor 3 --batchSize 4 --testBatchSize 100 --nEpochs 1 --lr 0.001 $ACCEL_FLAG || error "super resolution failed"
uv run super_resolve.py --input_image dataset/BSDS300/images/test/16077.jpg --model model_epoch_1.pth --output_filename out.png $ACCEL_FLAG || error "super resolution upscaling failed"
2019-07-12 21:19:42 -04:00
}
2020-06-09 22:08:19 -07:00
function time_sequence_prediction( ) {
2025-04-25 21:28:00 -07:00
uv run generate_sine_wave.py || { error "generate sine wave failed" ; return ; }
uv run train.py --steps 2 || error "time sequence prediction training failed"
2019-07-12 21:19:42 -04:00
}
function vae( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 || error "vae failed"
2019-07-12 21:19:42 -04:00
}
2023-04-26 21:24:13 +05:30
function vision_transformer( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 --dry-run || error "vision transformer example failed"
2023-04-26 21:24:13 +05:30
}
2019-07-12 21:19:42 -04:00
function word_language_model( ) {
2025-08-13 12:31:07 -06:00
uv run main.py --epochs 1 --dry-run $ACCEL_FLAG || error "word_language_model failed"
uv run generate.py $ACCEL_FLAG || error "word_language_model generate failed"
2025-05-17 20:20:56 -07:00
for model in "RNN_TANH" "RNN_RELU" "LSTM" "GRU" "Transformer" ; do
2025-08-13 12:31:07 -06:00
uv run main.py --model $model --epochs 1 --dry-run $ACCEL_FLAG || error "word_language_model failed"
uv run generate.py $ACCEL_FLAG || error "word_language_model generate failed"
2025-05-17 20:20:56 -07:00
done
2019-07-12 21:19:42 -04:00
}
2023-06-12 12:22:18 -06:00
function gcn( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 --dry-run || error "graph convolutional network failed"
2023-06-12 12:22:18 -06:00
}
2023-08-08 11:49:10 -04:00
function gat( ) {
2025-04-25 21:28:00 -07:00
uv run main.py --epochs 1 --dry-run || error "graph attention network failed"
2023-08-08 11:49:10 -04:00
}
2025-04-25 21:28:00 -07:00
eval " base_ $( declare -f stop) "
function stop( ) {
2019-07-12 21:19:42 -04:00
cd $BASE_DIR
2022-08-23 17:06:15 -07:00
rm -rf dcgan/fake_samples_epoch_000.png \
dcgan/netD_epoch_0.pth \
dcgan/netG_epoch_0.pth \
2020-07-02 00:43:39 +03:00
dcgan/real_samples.png \
fast_neural_style/saved_models.zip \
fast_neural_style/saved_models/ \
imagenet/checkpoint.pth.tar \
imagenet/lsun/ \
imagenet/model_best.pth.tar \
imagenet/sample/ \
2025-04-25 21:28:00 -07:00
language_translation/output/ \
2020-07-02 00:43:39 +03:00
snli/.data/ \
snli/.vector_cache/ \
snli/results/ \
time_sequence_prediction/predict*.pdf \
time_sequence_prediction/traindata.pt \
2023-06-12 12:22:18 -06:00
word_language_model/model.pt \
2023-08-08 11:49:10 -04:00
gcn/cora/ \
2025-08-22 23:21:04 -07:00
gat/cora/ || error "couldn't clean up some files"
2019-07-12 21:19:42 -04:00
git checkout fast_neural_style/images/output-images/amber-candy.jpg || error "couldn't clean up fast neural style image"
2025-04-25 21:28:00 -07:00
base_stop " $1 "
2019-07-12 21:19:42 -04:00
}
function run_all( ) {
2024-05-03 13:25:49 -07:00
# cpp moved to `run_cpp_examples.sh```
2025-04-25 21:28:00 -07:00
run dcgan
2024-05-03 13:25:49 -07:00
# distributed moved to `run_distributed_examples.sh`
2025-04-25 21:28:00 -07:00
run fast_neural_style
run imagenet
2024-06-08 13:13:14 -07:00
# language_translation
2025-04-25 21:28:00 -07:00
run mnist
run mnist_forward_forward
run mnist_hogwild
run mnist_rnn
run regression
run reinforcement_learning
run siamese_network
2025-08-23 00:03:28 -07:00
# run super_resolution - flaky
2025-04-25 21:28:00 -07:00
run time_sequence_prediction
run vae
2024-05-24 22:14:30 -07:00
# vision_transformer - example broken see https://github.com/pytorch/examples/issues/1184 and https://github.com/pytorch/examples/pull/1258 for more details
2025-04-25 21:28:00 -07:00
run word_language_model
run fx
run gcn
run gat
2019-07-12 21:19:42 -04:00
}
# by default, run all examples
if [ "" = = " $EXAMPLES " ] ; then
run_all
else
for i in $( echo $EXAMPLES | sed "s/,/ /g" )
do
2020-07-05 18:20:41 +03:00
echo " Starting $i "
2025-04-25 21:28:00 -07:00
run $i
2020-07-05 18:20:41 +03:00
echo " Finished $i , status $? "
2019-07-12 21:19:42 -04:00
done
fi
if [ "" = = " $ERRORS " ] ; then
2020-07-05 18:20:41 +03:00
echo " Completed successfully with status $? "
2020-03-04 20:05:36 -05:00
else
2024-05-03 13:25:49 -07:00
echo "Some python examples failed:"
printf " $ERRORS \n "
2022-01-16 02:15:13 +02:00
#Exit with error (0-255) in case of failure in one of the tests.
2022-01-14 05:19:58 +02:00
exit 1
2022-01-16 02:15:13 +02:00
2019-07-12 21:19:42 -04:00
fi