From f92e4fa709420dce148813edcbc5d65a5eb3fcc2 Mon Sep 17 00:00:00 2001 From: slowy07 Date: Thu, 19 Aug 2021 13:12:29 +0700 Subject: [PATCH] fix: grammar typos --- .../app/human_pose_estimation/common.py | 4 ++-- tensorlayer/db.py | 8 ++++---- .../dataset_loaders/flickr_1M_dataset.py | 2 +- .../dataset_loaders/flickr_25k_dataset.py | 2 +- .../files/dataset_loaders/ptb_dataset.py | 2 +- tensorlayer/files/utils.py | 20 +++++++++---------- tensorlayer/layers/embedding.py | 4 ++-- tensorlayer/layers/normalization.py | 2 +- tensorlayer/layers/recurrent.py | 2 +- tensorlayer/layers/utils.py | 6 +++--- tensorlayer/models/core.py | 4 ++-- tensorlayer/prepro.py | 8 ++++---- 12 files changed, 32 insertions(+), 32 deletions(-) diff --git a/tensorlayer/app/human_pose_estimation/common.py b/tensorlayer/app/human_pose_estimation/common.py index 0e01a7794..97ff4f125 100644 --- a/tensorlayer/app/human_pose_estimation/common.py +++ b/tensorlayer/app/human_pose_estimation/common.py @@ -251,7 +251,7 @@ def denormalize3D(self, data, which='scale'): if which == 'scale': data = data.reshape((-1, 17, 3)).copy() - # denormalize (x,y,z) coordiantes for results + # denormalize (x,y,z) coordinates for results for idx, item in enumerate(self.gt_testset): camera_name = item['camera_param']['name'] if camera_name == '54138969' or camera_name == '60457274': @@ -275,7 +275,7 @@ def denormalize2D(self, data, which='scale'): if which == 'scale': data = data.reshape((-1, 17, 2)).copy() - # denormalize (x,y,z) coordiantes for results + # denormalize (x,y,z) coordinates for results for idx, item in enumerate(self.gt_testset): camera_name = item['camera_param']['name'] if camera_name == '54138969' or camera_name == '60457274': diff --git a/tensorlayer/db.py b/tensorlayer/db.py index 129e251e5..e95be3f2d 100644 --- a/tensorlayer/db.py +++ b/tensorlayer/db.py @@ -118,7 +118,7 @@ def save_model(self, network=None, model_name='model', **kwargs): model_name : str The name/key of model. kwargs : other events - Other events, such as name, accuracy, loss, step number and etc (optinal). + Other events, such as name, accuracy, loss, step number and etc (optional). Examples --------- @@ -178,7 +178,7 @@ def find_top_model(self, sort=None, model_name='model', **kwargs): model_name : str or None The name/key of model. kwargs : other events - Other events, such as name, accuracy, loss, step number and etc (optinal). + Other events, such as name, accuracy, loss, step number and etc (optional). Examples --------- @@ -265,7 +265,7 @@ def save_dataset(self, dataset=None, dataset_name=None, **kwargs): dataset_name : str The name of dataset. kwargs : other events - Other events, such as description, author and etc (optinal). + Other events, such as description, author and etc (optional). Examples ---------- @@ -309,7 +309,7 @@ def find_top_dataset(self, dataset_name=None, sort=None, **kwargs): sort : List of tuple PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations `__ for more details. kwargs : other events - Other events, such as description, author and etc (optinal). + Other events, such as description, author and etc (optional). Examples --------- diff --git a/tensorlayer/files/dataset_loaders/flickr_1M_dataset.py b/tensorlayer/files/dataset_loaders/flickr_1M_dataset.py index f2e582ae5..880a5df65 100644 --- a/tensorlayer/files/dataset_loaders/flickr_1M_dataset.py +++ b/tensorlayer/files/dataset_loaders/flickr_1M_dataset.py @@ -32,7 +32,7 @@ def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printab n_threads : int The number of thread to read image. printable : boolean - Whether to print infomation when reading images, default is ``False``. + Whether to print information when reading images, default is ``False``. Examples ---------- diff --git a/tensorlayer/files/dataset_loaders/flickr_25k_dataset.py b/tensorlayer/files/dataset_loaders/flickr_25k_dataset.py index 8049a0653..9ff559d97 100644 --- a/tensorlayer/files/dataset_loaders/flickr_25k_dataset.py +++ b/tensorlayer/files/dataset_loaders/flickr_25k_dataset.py @@ -30,7 +30,7 @@ def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False n_threads : int The number of thread to read image. printable : boolean - Whether to print infomation when reading images, default is ``False``. + Whether to print information when reading images, default is ``False``. Examples ----------- diff --git a/tensorlayer/files/dataset_loaders/ptb_dataset.py b/tensorlayer/files/dataset_loaders/ptb_dataset.py index 30746fd87..670c8cf17 100644 --- a/tensorlayer/files/dataset_loaders/ptb_dataset.py +++ b/tensorlayer/files/dataset_loaders/ptb_dataset.py @@ -47,7 +47,7 @@ def load_ptb_dataset(path='data'): path = os.path.join(path, 'ptb') logging.info("Load or Download Penn TreeBank (PTB) dataset > {}".format(path)) - #Maybe dowload and uncompress tar, or load exsisting files + # Maybe download and uncompress tar, or load existing files filename = 'simple-examples.tgz' url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/' maybe_download_and_extract(filename, path, url, extract=True) diff --git a/tensorlayer/files/utils.py b/tensorlayer/files/utils.py index 80db87a62..c5b50f810 100644 --- a/tensorlayer/files/utils.py +++ b/tensorlayer/files/utils.py @@ -297,7 +297,7 @@ def static_graph2net(model_config): def load_hdf5_graph(filepath='model.hdf5', load_weights=False): - """Restore TL model archtecture from a a pickle file. Support loading model weights. + """Restore TL model architecture from a a pickle file. Support loading model weights. Parameters ----------- @@ -353,7 +353,7 @@ def load_hdf5_graph(filepath='model.hdf5', load_weights=False): # def load_pkl_graph(name='model.pkl'): -# """Restore TL model archtecture from a a pickle file. No parameters be restored. +# """Restore TL model architecture from a a pickle file. No parameters be restored. # # Parameters # ----------- @@ -778,7 +778,7 @@ def load_ptb_dataset(path='data'): path = os.path.join(path, 'ptb') logging.info("Load or Download Penn TreeBank (PTB) dataset > {}".format(path)) - # Maybe dowload and uncompress tar, or load exsisting files + # Maybe download and uncompress tar, or load existing files filename = 'simple-examples.tgz' url = 'http://www.fit.vutbr.cz/~imikolov/rnnlm/' maybe_download_and_extract(filename, path, url, extract=True) @@ -1063,7 +1063,7 @@ def load_flickr25k_dataset(tag='sky', path="data", n_threads=50, printable=False n_threads : int The number of thread to read image. printable : boolean - Whether to print infomation when reading images, default is ``False``. + Whether to print information when reading images, default is ``False``. Examples ----------- @@ -1135,7 +1135,7 @@ def load_flickr1M_dataset(tag='sky', size=10, path="data", n_threads=50, printab n_threads : int The number of thread to read image. printable : boolean - Whether to print infomation when reading images, default is ``False``. + Whether to print information when reading images, default is ``False``. Examples ---------- @@ -2104,7 +2104,7 @@ def load_and_assign_npz_dict(name='model.npz', network=None, skip=False): logging.warning("Weights named '%s' not found in network. Skip it." % key) else: raise RuntimeError( - "Weights named '%s' not found in network. Hint: set argument skip=Ture " + "Weights named '%s' not found in network. Hint: set argument skip=True " "if you want to skip redundant or mismatch weights." % key ) else: @@ -2332,7 +2332,7 @@ def load_file_list(path=None, regx='\.jpg', printable=True, keep_prefix=False): regx : str The regx of file name. printable : boolean - Whether to print the files infomation. + Whether to print the files information. keep_prefix : boolean Whether to keep path in the file name. @@ -2404,7 +2404,7 @@ def exists_or_mkdir(path, verbose=True): def maybe_download_and_extract(filename, working_directory, url_source, extract=False, expected_bytes=None): - """Checks if file exists in working_directory otherwise tries to dowload the file, + """Checks if file exists in working_directory otherwise tries to download the file, and optionally also tries to extract the file if format is ".zip" or ".tar" Parameters @@ -2412,11 +2412,11 @@ def maybe_download_and_extract(filename, working_directory, url_source, extract= filename : str The name of the (to be) dowloaded file. working_directory : str - A folder path to search for the file in and dowload the file to + A folder path to search for the file in and download the file to url : str The URL to download the file from extract : boolean - If True, tries to uncompress the dowloaded file is ".tar.gz/.tar.bz2" or ".zip" file, default is False. + If True, tries to uncompress the downloaded file is ".tar.gz/.tar.bz2" or ".zip" file, default is False. expected_bytes : int or None If set tries to verify that the downloaded file is of the specified size, otherwise raises an Exception, defaults is None which corresponds to no check being performed. diff --git a/tensorlayer/layers/embedding.py b/tensorlayer/layers/embedding.py index 9d0d882d1..a971493d9 100644 --- a/tensorlayer/layers/embedding.py +++ b/tensorlayer/layers/embedding.py @@ -28,9 +28,9 @@ class OneHot(Layer): depth : None or int If the input indices is rank N, the output will have rank N+1. The new axis is created at dimension `axis` (default: the new axis is appended at the end). on_value : None or number - The value to represnt `ON`. If None, it will default to the value 1. + The value to represent `ON`. If None, it will default to the value 1. off_value : None or number - The value to represnt `OFF`. If None, it will default to the value 0. + The value to represent `OFF`. If None, it will default to the value 0. axis : None or int The axis. dtype : None or TensorFlow dtype diff --git a/tensorlayer/layers/normalization.py b/tensorlayer/layers/normalization.py index 161d6e018..adf6c27ff 100644 --- a/tensorlayer/layers/normalization.py +++ b/tensorlayer/layers/normalization.py @@ -98,7 +98,7 @@ def _bias_scale(x, b, data_format): def _bias_add(x, b, data_format): - """Alternative implementation of tf.nn.bias_add which is compatiable with tensorRT.""" + """Alternative implementation of tf.nn.bias_add which is compatible with tensorRT.""" if data_format == 'NHWC': return tf.add(x, b) elif data_format == 'NCHW': diff --git a/tensorlayer/layers/recurrent.py b/tensorlayer/layers/recurrent.py index 565d27e4c..c61fae59a 100644 --- a/tensorlayer/layers/recurrent.py +++ b/tensorlayer/layers/recurrent.py @@ -813,7 +813,7 @@ class BasicConvLSTMCell(ConvRNNCell): The bias added to forget gates (see above). input_size : int Deprecated and unused. - state_is_tuple : boolen + state_is_tuple : boolean If True, accepted and returned states are 2-tuples of the `c_state` and `m_state`. If False, they are concatenated along the column axis. The latter behavior will soon be deprecated. act : activation function diff --git a/tensorlayer/layers/utils.py b/tensorlayer/layers/utils.py index e5dd154b1..696d67eef 100644 --- a/tensorlayer/layers/utils.py +++ b/tensorlayer/layers/utils.py @@ -112,7 +112,7 @@ def get_layers_with_name(net, name="", verbose=False): >>> layers = tl.layers.get_layers_with_name(net, "CNN", True) """ - logging.info(" [*] geting layers with %s" % name) + logging.info(" [*] getting layers with %s" % name) layers = [] i = 0 @@ -157,7 +157,7 @@ def get_variables_with_name(name=None, train_only=True, verbose=False): name : str Get the variables that contain this name. train_only : boolean - If Ture, only get the trainable variables. + If True, only get the trainable variables. verbose : boolean If True, print the information of all variables. @@ -175,7 +175,7 @@ def get_variables_with_name(name=None, train_only=True, verbose=False): if name is None: raise Exception("please input a name") - logging.info(" [*] geting variables with %s" % name) + logging.info(" [*] getting variables with %s" % name) # tvar = tf.trainable_variables() if train_only else tf.all_variables() if train_only: diff --git a/tensorlayer/models/core.py b/tensorlayer/models/core.py index 514db708f..2d258dd97 100644 --- a/tensorlayer/models/core.py +++ b/tensorlayer/models/core.py @@ -41,9 +41,9 @@ class Model(object): __init__(self, inputs=None, outputs=None, name=None) Initializing the Model. inputs() - Get input tensors to this network (only avaiable for static model). + Get input tensors to this network (only available for static model). outputs() - Get output tensors to this network (only avaiable for static model). + Get output tensors to this network (only available for static model). __call__(inputs, is_train=None, **kwargs) Forward input tensors through this network. all_layers() diff --git a/tensorlayer/prepro.py b/tensorlayer/prepro.py index 43a396a3f..28e982382 100644 --- a/tensorlayer/prepro.py +++ b/tensorlayer/prepro.py @@ -572,7 +572,7 @@ def affine_transform_cv2(x, transform_matrix, flags=None, border_mode='constant' elif border_mode is 'replicate': border_mode = cv2.BORDER_REPLICATE else: - raise Exception("unsupport border_mode, check cv.BORDER_ for more details.") + raise Exception("unsupported border_mode, check cv.BORDER_ for more details.") return cv2.warpAffine(x, transform_matrix[0:2,:], \ (cols,rows), flags=flags, borderMode=border_mode) @@ -2613,7 +2613,7 @@ def parse_darknet_ann_str_to_list(annotations): Parameters ----------- annotations : str - The annotations in darkent format "class, x, y, w, h ...." seperated by "\\n". + The annotations in darkent format "class, x, y, w, h ...." separated by "\\n". Returns ------- @@ -3762,7 +3762,7 @@ def resize_image(image, annos, mask, target_width, target_height): new_keypoints = [] for keypoints in people: - # case orginal points are not usable + # case original points are not usable if keypoints[1] >= crop_range_y and keypoints[1] <= crop_range_y + _target_height - 1: pts = (int(keypoints[0]), int(keypoints[1] - crop_range_y)) else: @@ -3783,7 +3783,7 @@ def resize_image(image, annos, mask, target_width, target_height): new_keypoints = [] for keypoints in people: - # case orginal points are not usable + # case original points are not usable if keypoints[0] >= crop_range_x and keypoints[0] <= crop_range_x + _target_width - 1: pts = (int(keypoints[0] - crop_range_x), int(keypoints[1])) else: