SIGN IN SIGN UP
apache / mxnet UNCLAIMED

Lightweight, Portable, Flexible Distributed/Mobile Deep Learning with Dynamic, Mutation-aware Dataflow Dep Scheduler; for Python, R, Julia, Scala, Go, Javascript and more

0 0 1 C++
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#pylint: disable=no-member, too-many-locals, too-many-branches, no-self-use, broad-except, lost-exception, too-many-nested-blocks, too-few-public-methods, invalid-name
"""
This file tests and ensures that all tutorials notebooks run
without warning or exception.
env variable MXNET_TUTORIAL_TEST_KERNEL controls which kernel to use
when running the notebook. e.g:
`export MXNET_TUTORIAL_TEST_KERNEL=python2`
env variable MXNET_TUTORIAL_TEST_NO_CACHE controls whether to clean the
temporary directory in which the notebook was run and re-download any
resource file. The default behaviour is to not clean the directory. Set to '1'
to force clean the directory. e.g:
`export MXNET_TUTORIAL_TEST_NO_CACHE=1`
NB: in the real CI, the tests will re-download everything since they start from
a clean workspace.
"""
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'utils'))
from notebook_test import run_notebook
# This is outdated and need to be completely redone.
TUTORIAL_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'docs', '_build', 'html', 'tutorials')
KERNEL = os.getenv('MXNET_TUTORIAL_TEST_KERNEL', None)
NO_CACHE = os.getenv('MXNET_TUTORIAL_TEST_NO_CACHE', False)
def _test_tutorial_nb(tutorial):
"""Run tutorial Jupyter notebook to catch any execution error.
Parameters
----------
tutorial : str
the name of the tutorial to be tested
Returns
-------
True if there are no warnings or errors.
"""
return run_notebook(tutorial, TUTORIAL_DIR, kernel=KERNEL, no_cache=NO_CACHE)
def test_basic_ndarray():
assert _test_tutorial_nb('basic/ndarray')
def test_basic_ndarray_indexing():
assert _test_tutorial_nb('basic/ndarray_indexing')
def test_basic_symbol():
assert _test_tutorial_nb('basic/symbol')
def test_basic_module():
assert _test_tutorial_nb('basic/module')
def test_basic_data():
assert _test_tutorial_nb('basic/data')
def test_basic_reshape_transpose():
assert _test_tutorial_nb('basic/reshape_transpose')
def test_gluon_customop():
assert _test_tutorial_nb('gluon/customop')
def test_gluon_custom_layer():
assert _test_tutorial_nb('gluon/custom_layer')
def test_gluon_transforms():
assert _test_tutorial_nb('gluon/transforms')
def test_gluon_data_augmentation():
assert _test_tutorial_nb('gluon/data_augmentation')
def test_gluon_datasets():
assert _test_tutorial_nb('gluon/datasets')
def test_gluon_naming():
assert _test_tutorial_nb('gluon/naming')
def test_gluon_ndarray():
assert _test_tutorial_nb('gluon/ndarray')
def test_gluon_mnist():
assert _test_tutorial_nb('gluon/mnist')
def test_gluon_autograd():
assert _test_tutorial_nb('gluon/autograd')
def test_gluon_gluon():
assert _test_tutorial_nb('gluon/gluon')
def test_gluon_multi_gpu():
assert _test_tutorial_nb('gluon/multi_gpu')
def test_gluon_save_load_params():
assert _test_tutorial_nb('gluon/save_load_params')
def test_gluon_hybrid():
assert _test_tutorial_nb('gluon/hybrid')
# https://github.com/apache/mxnet/issues/16181
"""
def test_gluon_performance():
assert _test_tutorial_nb('gluon/performance')
"""
def test_gluon_pretrained_models():
assert _test_tutorial_nb('gluon/pretrained_models')
def test_gluon_learning_rate_finder():
assert _test_tutorial_nb('gluon/learning_rate_finder')
def test_gluon_learning_rate_schedules():
assert _test_tutorial_nb('gluon/learning_rate_schedules')
def test_gluon_learning_rate_schedules_advanced():
assert _test_tutorial_nb('gluon/learning_rate_schedules_advanced')
def test_gluon_info_gan():
assert _test_tutorial_nb('gluon/info_gan')
def test_gluon_fit_api_fashion_mnist():
assert _test_tutorial_nb('gluon/fit_api_tutorial')
def test_nlp_cnn():
assert _test_tutorial_nb('nlp/cnn')
def test_onnx_super_resolution():
assert _test_tutorial_nb('onnx/super_resolution')
def test_onnx_export_mxnet_to_onnx():
assert _test_tutorial_nb('onnx/export_mxnet_to_onnx')
def test_onnx_fine_tuning_gluon():
assert _test_tutorial_nb('onnx/fine_tuning_gluon')
def test_onnx_inference_on_onnx_model():
assert _test_tutorial_nb('onnx/inference_on_onnx_model')
def test_python_linear_regression():
assert _test_tutorial_nb('python/linear-regression')
def test_python_logistic_regression() :
assert _test_tutorial_nb('gluon/logistic_regression_explained')
def test_python_numpy_gotchas() :
assert _test_tutorial_nb('gluon/gotchas_numpy_in_mxnet')
def test_gluon_end_to_end():
assert _test_tutorial_nb('gluon/gluon_from_experiment_to_deployment')
def test_python_mnist():
assert _test_tutorial_nb('python/mnist')
def test_python_predict_image():
assert _test_tutorial_nb('python/predict_image')
def test_python_data_augmentation():
assert _test_tutorial_nb('python/data_augmentation')
def test_python_data_augmentation_with_masks():
assert _test_tutorial_nb('python/data_augmentation_with_masks')
def test_python_kvstore():
assert _test_tutorial_nb('python/kvstore')
def test_module_to_gluon():
assert _test_tutorial_nb('python/module_to_gluon')
def test_python_types_of_data_augmentation():
assert _test_tutorial_nb('python/types_of_data_augmentation')
#https://github.com/apache/mxnet/issues/16181
"""
def test_python_profiler():
assert _test_tutorial_nb('python/profiler')
"""
def test_sparse_row_sparse():
assert _test_tutorial_nb('sparse/row_sparse')
def test_sparse_csr():
assert _test_tutorial_nb('sparse/csr')
def test_sparse_train():
assert _test_tutorial_nb('sparse/train')
def test_sparse_train_gluon():
assert _test_tutorial_nb('sparse/train_gluon')
def test_speech_recognition_ctc():
assert _test_tutorial_nb('speech_recognition/ctc')
def test_unsupervised_learning_gan():
assert _test_tutorial_nb('unsupervised_learning/gan')
def test_vision_large_scale_classification():
assert _test_tutorial_nb('vision/large_scale_classification')
[MXNET-414] Tutorial on visualizing CNN decisions using Grad-CAM (#10900) * Add gradcam * Fix imports * Add Apache license * Add some documentation for ReluOp. Set guided_backprop to false by default * Add documentation for Conv2D. Edit doc for ReluOp * Document _get_grad. Add set_guided_backprop method * Add doc for get_conv_out_grad * Add doc for get_image_grad * Add doc for get_cam and get_guided_grad_cam * Document to_grayscale and get_img_heatmap * Minor bug fix and method rename * Minor * Images for demo. * 1. Add first version of gradcam cli demo. 2. Add VGG network for demo. * Bug fix * Add comments. Use the image path received in command line * Create output images in the form %s_%s where former is the image name and later is an appropriate suffix like 'gradcam', 'saliency', etc * Add license * Print predicted category and output file names. Add more doc. * Add markdown for CNN visualization tutorial * Insert source download buttons * Minor fix * Download imaged from dmlc/web-data * Fix the image at the top * Create README.md * Remove images from examples folder * Add license header * Minor change in how images are displayed * Add the tutorial to tests * Minor edit * Remove nonascii chars * Add instruction to do visualization from terminal * Download synset file. We are not placing the file in repo. * Add tutorial to tutorials index * Language corrections * Remove cv2 dependencies * Simplify and document 'get_vgg' * Simplify vgg16. Show name of last conv layer in markdown. * Change colormap to cv2.COLORMAP_COOL
2018-05-24 11:00:48 -07:00
def test_vision_cnn_visualization():
assert _test_tutorial_nb('vision/cnn_visualization')
def test_control_flow():
assert _test_tutorial_nb('control_flow/ControlFlowTutorial')
MXNet AMP (automatic mixed precision) (#14173) * Beginning of AMP * Optimize noop cast * More operations added * Backward cast * Adding AMPCast and AMPMultiCast * Fix some of lint * Changed symbol wrapper to handle hidden inputs Added PoC of dynamic loss scaling * Moved back to dmlc/tvm repo * fix counter reset to increase loss scale every 2k iterations * Fix indentation * Add contrib from symbol and ndarray to symbol list * Adding where to widest type cast * Do not cast in imperative mode on CPU context * Update dmlc-core to fix unittests * Fix wrapper metadata, fix self handling * Blacklist sync batchnorm (since its implementation is FP32 only) * Fix lint * Enable losses to be tuple * Get rid of AMP handle * Add scaling to Output functions * Fix pylint * Update dmlc-core * Changing prints in AMP to logging.info * NNVM -> MXNet for FInferShape * Bring the inplaceidentity fix to copied pass from NNVM * Added tutorial for AMP * Making Windows compiler happy * Fixes to tutorial * More fixes * Fix lint * Fix * Add amp/index.md to whitelist for tutorial tests * Whitelisting cuDNN RNN * Manual unscale * _internal functions wrapping * Make SymbolFunctor from Symbol * Fix the type infer function of AMP multicast * Added ability to override casting lists * Making clang-tidy and pylint happy * More cleaning * Making clang-tidy really happy * remove amp_cast and amp_multicast before saving the model * Changes from review * Add RemoveAmpCast in a separate c_api function, add the option in symbol.save * add remove_amp_cast option (True by default) to everyway of saving symbol * Fix * First stab at adding the gray list * More ops added * Adding the rest of the functions * Improvements to AMP test * Changing of names and changing wrapping * Moving to contrib * Modifying tutorial for contrib AMP * Removing non existent functions * Fix import in test * Fix lint * Added new functions * Added assert * Fix the unknown ndim in PlanMemory pass * Moving back to FP16_FUNCS and FP16_FP32_FUNCS * Removing unnecessary ops * Adding ops that exist only in some build configurations and removing tests checking that AMP lists contain only existing ops * Removing warning when not every function was found during AMP init because of functions being available only in specific configurations * Add tests and doc * Fix the CPU version of all_finite * Adding test cases for all_finite operator * Add new operators * Fix
2019-05-20 20:21:26 -07:00
def test_amp():
assert _test_tutorial_nb('amp/amp_tutorial')
# https://github.com/apache/mxnet/issues/16181
"""
def test_dnnl_quantization():
assert _test_tutorial_nb('dnnl/dnnl_quantization')
"""