2017-08-08 16:36:23 -07:00
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
2018-02-18 03:11:58 -08:00
import sys
2016-11-17 22:19:38 -08:00
import os
import numpy as np
import mxnet as mx
from mxnet . test_utils import *
2018-02-18 03:11:58 -08:00
curr_path = os . path . dirname ( os . path . abspath ( os . path . expanduser ( __file__ ) ) )
sys . path . insert ( 0 , os . path . join ( curr_path , ' ../unittest ' ) )
2018-06-13 21:08:16 -04:00
from common import setup_module , with_seed , teardown
2018-01-26 10:05:21 -08:00
from mxnet . gluon import utils
2018-08-03 14:05:41 +02:00
import tarfile
2016-11-17 22:19:38 -08:00
2017-03-16 15:01:51 -07:00
def _get_model ( ) :
if not os . path . exists ( ' model/Inception-7-symbol.json ' ) :
2018-08-03 14:05:41 +02:00
download ( ' http://data.mxnet.io/models/imagenet/inception-v3.tar.gz ' )
with tarfile . open ( name = " inception-v3.tar.gz " , mode = " r:gz " ) as tf :
tf . extractall ( )
2016-11-17 22:19:38 -08:00
2017-03-16 15:01:51 -07:00
def _dump_images ( shape ) :
2016-11-17 22:19:38 -08:00
import skimage . io
import skimage . transform
img_list = [ ]
for img in sorted ( os . listdir ( ' data/test_images/ ' ) ) :
img = skimage . io . imread ( ' data/test_images/ ' + img )
short_egde = min ( img . shape [ : 2 ] )
yy = int ( ( img . shape [ 0 ] - short_egde ) / 2 )
xx = int ( ( img . shape [ 1 ] - short_egde ) / 2 )
img = img [ yy : yy + short_egde , xx : xx + short_egde ]
img = skimage . transform . resize ( img , shape )
img_list . append ( img )
imgs = np . asarray ( img_list , dtype = np . float32 ) . transpose ( ( 0 , 3 , 1 , 2 ) ) - 128
np . save ( ' data/test_images_ %d _ %d .npy ' % shape , imgs )
2017-03-16 15:01:51 -07:00
def _get_data ( shape ) :
2018-01-26 10:05:21 -08:00
hash_test_img = " 355e15800642286e7fe607d87c38aeeab085b0cc "
hash_inception_v3 = " 91807dfdbd336eb3b265dd62c2408882462752b9 "
2018-01-26 22:53:47 -08:00
utils . download ( " http://data.mxnet.io/data/test_images_ %d _ %d .npy " % ( shape ) ,
path = " data/test_images_ %d _ %d .npy " % ( shape ) ,
sha1_hash = hash_test_img )
utils . download ( " http://data.mxnet.io/data/inception-v3-dump.npz " ,
path = ' data/inception-v3-dump.npz ' ,
sha1_hash = hash_inception_v3 )
2016-11-17 22:19:38 -08:00
2018-02-18 03:11:58 -08:00
@with_seed ( )
2016-11-17 22:19:38 -08:00
def test_consistency ( dump = False ) :
shape = ( 299 , 299 )
2017-03-16 15:01:51 -07:00
_get_model ( )
_get_data ( shape )
2016-11-17 22:19:38 -08:00
if dump :
2017-03-16 15:01:51 -07:00
_dump_images ( shape )
2016-11-17 22:19:38 -08:00
gt = None
else :
gt = { n : mx . nd . array ( a ) for n , a in np . load ( ' data/inception-v3-dump.npz ' ) . items ( ) }
data = np . load ( ' data/test_images_ %d _ %d .npy ' % shape )
sym , arg_params , aux_params = mx . model . load_checkpoint ( ' model/Inception-7 ' , 1 )
arg_params [ ' data ' ] = data
arg_params [ ' softmax_label ' ] = np . random . randint ( low = 1 , high = 1000 , size = ( data . shape [ 0 ] , ) )
ctx_list = [ { ' ctx ' : mx . gpu ( 0 ) , ' data ' : data . shape , ' type_dict ' : { ' data ' : data . dtype } } ,
{ ' ctx ' : mx . cpu ( 0 ) , ' data ' : data . shape , ' type_dict ' : { ' data ' : data . dtype } } ]
gt = check_consistency ( sym , ctx_list , arg_params = arg_params , aux_params = aux_params ,
[v1.x] Backport Unittest tolerance handling improvements (#18694). Also test seeding (#18762). (#19148)
* Add sm arch 80 to Makefile
* Unittest tolerance handling improvements (#18694)
* Add sm arch 80 to Makefile
* Add TF32 to cuBLAS GEMMs
Signed-off-by: Serge Panev <spanev@nvidia.com>
* Add CUDA version guards
Signed-off-by: Serge Panev <spanev@nvidia.com>
* Remove useless TF32 for double and old CUDA version
Signed-off-by: Serge Panev <spanev@nvidia.com>
* Factorize VERSION_ADJUSTED_TF32_MATH
Signed-off-by: Serge Panev <spanev@nvidia.com>
* Add TF32 considerations to test_util.py:check_consistency()
* Bypass test_gluon_gpu.py:test_large_models if gmem >32GB
* Default tols in assert_almost_equal() now a function of dtype and ctx
* Expand types listed by default_tols()
* Fix pylint
* All with_seed() tests to waitall in teardown
* Elevate MXNET_TEST_SEED logging to WARNING
* Revert test_gluon_gpu.py:test_rnn_layer to default tols
* Fix test_gluon_model_zoo_gpu.py::test_inference and test_operator_gpy.py::test_np_linalg_{solve,tensorinv}
* test_numpy_interoperability.py to not fix seed for rest of CI
* Further fix to test_np_linalg_tensorinv
* Fix test_gluon_data.py:test_dataloader_context when run on 1-GPU system.
* Fix test_operator_gpu.py::test_embedding_with_type
* Fix test_operator_gpu.py::{test_*convolution_large_c,test_np_linalg_tensorsolve}
* Remove unneeded print() from test_numpy_interoperability.py
* Unify tol handling of check_consistency() and assert_almost_equal(). Test tweeks.
* Add tol handling of assert_almost_equal() with number args
* Add tol handling of bool comparisons
* Fix test_numpy_op.py::test_np_random_rayleigh
* Fix test_operator_gpu.py::test_batchnorm_with_type
* Fix test_gluon.py::test_sync_batchnorm in cpu selftest
* Improve unittest failure reporting
* Add to robustness of test_operator_gpu.py::test_embedding_with_type
* Check_consistency() to use equal backward gradients for increased test robustness
* Fix test_operator_gpu.py::test_{fully_connected,gemm}. Add default_numeric_eps().
* test_utils.py fix for numeric gradient calc
* Reinstate rtol=1e-2 for test_operator.py::test_order
* Remove auto-cast of check_consistency() input data to least precise dtype (not needed)
* Fix test_operator.py::test_{reciprocol,cbrt,rcbrt}_op
* Expand default float64 numeric_eps for test_operator_gpu.py::test_sofmin
* Fix segfault-on-error of @retry decorator. Add test isolation.
* assert_almost_equal() to handle a,b scalars
* Fix test_operator_gpu.py::test_gluon_{mvn,mvn_v1} race
* Fix test_operator_gpu.py::test_flatten_slice_after_conv via scale
* Remove test_utils.py:almost_equal_ignore_nan()
* Fix sample vs. pop variance issue with test_numpy_op.py::test_npx_batch_norm
* Expose test_utils.py:effective_dtype() and use to fix test_operator_gpu.py::test_np_linalg_svd
* Fix true_divide int_array / int_scalar -> float_array to honor np_default_dtype
* Try test_elemwise_binary_ops serial to avoid pytest worker crash
* Fix (log_)softmax backward on empty ndarray
* Temporarily log all CI seeds to troubleshoot seed non-determinism
* Revert "Temporarily log all CI seeds to troubleshoot seed non-determinism"
This reverts commit f60eff20785b812ac4fcd70d51359ee0cbfb3e47.
* Temp log all CI seeds to troubleshoot unwanted seed determinism
* Revert "Add sm arch 80 to Makefile"
This reverts commit f9306cecc53b0633ef5f5b7b000802fbf0d73fe9.
* Same fix of sample vs. pop variance issue, now with test_operator_gpu.py::test_batchnorm
* Revert "Temp log all CI seeds to troubleshoot unwanted seed determinism"
This reverts commit ff328efb0be3445690669d5437a6af575ff12b49.
* Marking test_sparse_dot_grad with garbage_expected after teardown error
* Fix flakiness of test_gluon_probability{_v1,_v2}.py::test_gluon_kl{_v1,}
* Temp skip of test_aggregate_duplication on gpu
* Add seeding to test_{numpy,}_contrib_gluon_data_vision.py. Make created files unique.
* Add ndarray module isolation to help debug test_bbox_augmenters worker crash
* Marking test_sparse_square_sum serial after pytest worker crash
* Fix flakiness of test_gluon_probability{_v1,_v2}.py::test_half_cauchy{_v1,}
Co-authored-by: Serge Panev <spanev@nvidia.com>
Co-authored-by: Bart Gawrych <gawrych.bartlomiej@intel.com>
* Fix test_gluon_data.py:test_dataloader_context when run on 1-GPU system.
* Remove pytest decorators introduced in error
* Fix test_forward.py:test_consistency
* Fix test_numpy_op.py tests
* Improve test seeding in test_numpy_interoperablity.py (#18762)
* Fix test_numpy_op.py:test_np_random_{beta,chisquare}
* Reduce problem sizes with test_optimizer.py:test_multilamb
* Skip test_gluon_gpu.py:test_fused_{lstm,gpu}_layer, fix test_rnn_cells, for fp16 contexts
* Trigger CI
Co-authored-by: Serge Panev <spanev@nvidia.com>
Co-authored-by: Bart Gawrych <gawrych.bartlomiej@intel.com>
2020-09-17 15:47:32 -07:00
rtol = 1e-3 , atol = 1e-3 , grad_req = ' null ' , raise_on_err = False , ground_truth = gt )
2016-11-17 22:19:38 -08:00
if dump :
np . savez ( ' data/inception-v3-dump.npz ' , * * { n : a . asnumpy ( ) for n , a in gt . items ( ) } )
if __name__ == ' __main__ ' :
test_consistency ( False )