diff --git a/examples/basic_tutorials/load_pytorch_parameters_to_tensorlayerx.py b/examples/basic_tutorials/load_pytorch_parameters_to_tensorlayerx.py index 5815be6..c656897 100644 --- a/examples/basic_tutorials/load_pytorch_parameters_to_tensorlayerx.py +++ b/examples/basic_tutorials/load_pytorch_parameters_to_tensorlayerx.py @@ -88,7 +88,7 @@ def def_torch_weight_reshape(weight): # Step1: save pytorch model parameters to a.pth # On the first run, uncomment lines 90 and 91. # b = B() - # torch.save(a.state_dict(), 'a.pth') + # torch.save(b.state_dict(), 'a.pth') a = A() # Step2: Converts pytorch a.pth to the model parameter format of tensorlayerx diff --git a/tensorlayerx/backend/ops/paddle_nn.py b/tensorlayerx/backend/ops/paddle_nn.py index 3109405..e8d7c41 100644 --- a/tensorlayerx/backend/ops/paddle_nn.py +++ b/tensorlayerx/backend/ops/paddle_nn.py @@ -496,10 +496,10 @@ class Conv2D(object): def __init__(self, strides, padding, data_format='NHWC', dilations=None, out_channel=None, k_size=None): self.data_format, self.padding = preprocess_2d_format(data_format, padding) - if self.data_format is 'NHWC': + if self.data_format == 'NHWC': self._stride = (strides[1], strides[2]) self._dilation = (dilations[1], dilations[2]) - elif self.data_format is 'NCHW': + elif self.data_format == 'NCHW': self._stride = (strides[2], strides[3]) self._dilation = (dilations[2], dilations[3]) @@ -537,10 +537,10 @@ def conv2d(input, filters, strides, padding, data_format='NCHW', dilations=None) A Tensor. Has the same type as input. """ data_format, padding = preprocess_2d_format(data_format, padding) - if data_format is 'NHWC': + if data_format == 'NHWC': _stride = (strides[1], strides[2]) _dilation = (dilations[1], dilations[2]) - elif data_format is 'NCHW': + elif data_format == 'NCHW': _stride = (strides[2], strides[3]) _dilation = (dilations[2], dilations[3]) outputs = F.conv2d( @@ -553,10 +553,10 @@ class Conv3D(object): def __init__(self, strides, padding, data_format='NDHWC', dilations=None, out_channel=None, k_size=None): self.data_format, self.padding = preprocess_3d_format(data_format, padding) - if self.data_format is 'NDHWC': + if self.data_format == 'NDHWC': self._strides = (strides[1], strides[2], strides[3]) self._dilations = (dilations[1], dilations[2], dilations[3]) - elif self.data_format is 'NCDHW': + elif self.data_format == 'NCDHW': self._strides = (strides[2], strides[3], strides[4]) self._dilations = (dilations[2], dilations[3], dilations[4]) @@ -603,10 +603,10 @@ def conv3d(input, filters, strides, padding, data_format='NDHWC', dilations=None A Tensor. Has the same type as input. """ data_format, padding = preprocess_3d_format(data_format, padding) - if data_format is 'NDHWC': + if data_format == 'NDHWC': _strides = (strides[1], strides[2], strides[3]) _dilations = (dilations[1], dilations[2], dilations[3]) - elif data_format is 'NCDHW': + elif data_format == 'NCDHW': _strides = (strides[2], strides[3], strides[4]) _dilations = (dilations[2], dilations[3], dilations[4]) outputs = F.conv3d( @@ -1195,10 +1195,10 @@ def __init__(self, strides, padding, data_format, dilations, out_channel, k_size self.k_size = k_size self.groups = groups self.data_format, self.padding = preprocess_2d_format(data_format, padding) - if self.data_format is 'NHWC': + if self.data_format == 'NHWC': self.strides = (strides[1], strides[2]) self.dilations = (dilations[1], dilations[2]) - elif self.data_format is 'NCHW': + elif self.data_format == 'NCHW': self.strides = (strides[2], strides[3]) self.dilations = (dilations[2], dilations[3]) @@ -1241,10 +1241,10 @@ def __init__(self, strides, padding, data_format, dilations, out_channel, k_size self.in_channel = int(in_channel) self.depth_multiplier = depth_multiplier self.data_format, self.padding = preprocess_2d_format(data_format, padding) - if self.data_format is 'NHWC': + if self.data_format == 'NHWC': self.strides = (strides[1], strides[2]) self.dilations = (dilations[1], dilations[2]) - elif self.data_format is 'NCHW': + elif self.data_format == 'NCHW': self.strides = (strides[2], strides[3]) self.dilations = (dilations[2], dilations[3]) diff --git a/tensorlayerx/files/dataset_loaders/mnist_dataset.py b/tensorlayerx/files/dataset_loaders/mnist_dataset.py index d077146..d058305 100644 --- a/tensorlayerx/files/dataset_loaders/mnist_dataset.py +++ b/tensorlayerx/files/dataset_loaders/mnist_dataset.py @@ -31,6 +31,6 @@ def load_mnist_dataset(shape=(-1, 784), path='data'): """ logging.info("If can't download this dataset automatically, " "please download it from the official website manually." - "mnist Dataset ." + "mnist Dataset ." "Please place dataset under 'data/mnist/' by default.") - return _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/') + return _load_mnist_dataset(shape, path, name='mnist', url='https://ossci-datasets.s3.amazonaws.com/mnist/') diff --git a/tensorlayerx/files/dataset_loaders/mnist_utils.py b/tensorlayerx/files/dataset_loaders/mnist_utils.py index 1c9ece5..64aa2e9 100644 --- a/tensorlayerx/files/dataset_loaders/mnist_utils.py +++ b/tensorlayerx/files/dataset_loaders/mnist_utils.py @@ -12,7 +12,7 @@ __all__ = ["_load_mnist_dataset"] -def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): +def _load_mnist_dataset(shape, path, name='mnist', url='https://ossci-datasets.s3.amazonaws.com/mnist/'): """A generic function to load mnist-like dataset. Parameters: @@ -24,7 +24,7 @@ def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/ex name : str The dataset name you want to use(the default is 'mnist'). url : str - The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/'). + The url of dataset(the default is 'https://ossci-datasets.s3.amazonaws.com/mnist/'). """ path = os.path.join(path, name) diff --git a/tensorlayerx/files/utils.py b/tensorlayerx/files/utils.py index c1d00d7..f94e943 100644 --- a/tensorlayerx/files/utils.py +++ b/tensorlayerx/files/utils.py @@ -280,7 +280,7 @@ def load_mnist_dataset(shape=(-1, 784), path='data'): >>> X_train, y_train, X_val, y_val, X_test, y_test = tlx.files.load_mnist_dataset(shape=(-1,784), path='datasets') >>> X_train, y_train, X_val, y_val, X_test, y_test = tlx.files.load_mnist_dataset(shape=(-1, 28, 28, 1)) """ - return _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/') + return _load_mnist_dataset(shape, path, name='mnist', url='https://ossci-datasets.s3.amazonaws.com/mnist/') def load_fashion_mnist_dataset(shape=(-1, 784), path='data'): @@ -310,7 +310,7 @@ def load_fashion_mnist_dataset(shape=(-1, 784), path='data'): ) -def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/exdb/mnist/'): +def _load_mnist_dataset(shape, path, name='mnist', url='https://ossci-datasets.s3.amazonaws.com/mnist/'): """A generic function to load mnist-like dataset. Parameters: @@ -322,7 +322,7 @@ def _load_mnist_dataset(shape, path, name='mnist', url='http://yann.lecun.com/ex name : str The dataset name you want to use(the default is 'mnist'). url : str - The url of dataset(the default is 'http://yann.lecun.com/exdb/mnist/'). + The url of dataset(the default is 'https://ossci-datasets.s3.amazonaws.com/mnist/'). """ path = os.path.join(path, name) @@ -2375,7 +2375,7 @@ def maybe_download_and_extract(filename, working_directory, url_source, extract= -------- >>> down_file = tlx.files.maybe_download_and_extract(filename='train-images-idx3-ubyte.gz', ... working_directory='data/', - ... url_source='http://yann.lecun.com/exdb/mnist/') + ... url_source='https://ossci-datasets.s3.amazonaws.com/mnist/') >>> tlx.files.maybe_download_and_extract(filename='ADEChallengeData2016.zip', ... working_directory='data/', ... url_source='http://sceneparsing.csail.mit.edu/data/',