From ff36c109cb89e241f1cb99f54bf27ff86d77490c Mon Sep 17 00:00:00 2001 From: Nikolaos Date: Tue, 8 Feb 2022 13:59:35 +0200 Subject: [PATCH 01/69] Updated to CUDA11.0 --- Dockerfile-cuda | 25 ------------------- bin/install.sh | 9 +++++-- src/opendr/control/single_demo_grasp/Makefile | 2 +- .../object_detection_2d/dependencies.ini | 3 +-- 4 files changed, 9 insertions(+), 30 deletions(-) diff --git a/Dockerfile-cuda b/Dockerfile-cuda index 6c70f3853a..986511646f 100644 --- a/Dockerfile-cuda +++ b/Dockerfile-cuda @@ -5,31 +5,6 @@ RUN apt-get update && \ apt-get --yes install git sudo apt-utils RUN DEBIAN_FRONTEND="noninteractive" apt-get -y install tzdata -# Install CUDA 10.2 -RUN apt-get --yes install gcc-8 g++-8 wget && \ - update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 100 --slave /usr/bin/g++ g++ /usr/bin/g++-8 && \ - update-alternatives --auto gcc && \ - apt-get --yes install zlib1g-dev libbz2-dev libreadline-dev libssl-dev libsqlite3-dev libffi-dev && \ - wget https://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_440.33.01_linux.run && \ - wget http://people.cs.uchicago.edu/~kauffman/nvidia/cudnn/cudnn-10.2-linux-x64-v8.2.0.53.tgz - -RUN sudo apt-get --yes install libxml2 && \ - sh cuda_10.2.89_440.33.01_linux.run --silent --toolkit --override --librarypath=/usr/local/cuda-10.2 && \ - ln -s /usr/local/cuda-10.2/ /usr/local/cuda && \ - tar -xzvf cudnn-10.2-linux-x64-v8.2.0.53.tgz && \ - cp cuda/include/cudnn.h /usr/local/cuda/include && \ - cp cuda/lib64/libcudnn* /usr/local/cuda/lib64 && \ - chmod a+r /usr/local/cuda/include/cudnn.h /usr/local/cuda/lib64/libcudnn* && \ - bash -c 'echo "/usr/local/cuda-10.2/lib64" > /etc/ld.so.conf.d/nvidia.conf' && \ - ln -sf /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_adv_train.so.8.2.0 /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_adv_train.so.8 && \ - ln -sf /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8.2.0 /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_ops_infer.so.8 && \ - ln -sf /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8.0.0 /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_cnn_train.so.8 && \ - ln -sf /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8.2.0 /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_adv_infer.so.8 && \ - ln -sf /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_ops_train.so.8.2.0 /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_ops_train.so.8 && \ - ln -sf /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8.2.0 /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn_cnn_infer.so.8 && \ - ln -sf /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn.so.8.2.0 /usr/local/cuda-10.2/targets/x86_64-linux/lib/libcudnn.so.8 && \ - ldconfig - # Add Tini ENV TINI_VERSION v0.19.0 ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini diff --git a/bin/install.sh b/bin/install.sh index e819c7a100..89cd8fa089 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -38,7 +38,12 @@ sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main # Build OpenDR make install_compilation_dependencies make install_runtime_dependencies -make libopendr -deactivate +# Install GPU dependencies separately +if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then + pip install mxnet-cu110 +fi + +make libopendr +deactivate \ No newline at end of file diff --git a/src/opendr/control/single_demo_grasp/Makefile b/src/opendr/control/single_demo_grasp/Makefile index 755bbe20c0..f8e9b18617 100644 --- a/src/opendr/control/single_demo_grasp/Makefile +++ b/src/opendr/control/single_demo_grasp/Makefile @@ -17,7 +17,7 @@ MAKECMDGOALS = release endif ifeq ($(OPENDR_DEVICE),gpu) -DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.7/index.html +DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cu110/torch1.7/index.html else DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.7/index.html endif diff --git a/src/opendr/perception/object_detection_2d/dependencies.ini b/src/opendr/perception/object_detection_2d/dependencies.ini index 54850d7303..793212cb13 100644 --- a/src/opendr/perception/object_detection_2d/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/dependencies.ini @@ -1,8 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=mxnet-cu102==1.8.0 - mxnet==1.8.0 +python=mxnet gluoncv==0.11.0b20210908 tqdm==4.54.0 git+https://github.com/cidl-auth/cocoapi#subdirectory=PythonAPI From 48c8a9750386b6a935c78560a27620d59b911385 Mon Sep 17 00:00:00 2001 From: Nikolaos Date: Tue, 8 Feb 2022 14:40:47 +0200 Subject: [PATCH 02/69] Switch to devel image --- Dockerfile-cuda | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/Dockerfile-cuda b/Dockerfile-cuda index 986511646f..92a832ed86 100644 --- a/Dockerfile-cuda +++ b/Dockerfile-cuda @@ -1,4 +1,4 @@ -FROM nvidia/cuda:11.0-base +FROM nvidia/cuda:11.0-devel # Install dependencies RUN apt-get update && \ @@ -12,13 +12,11 @@ RUN chmod +x /tini ENTRYPOINT ["/tini", "--"] # Avoid switching back to gcc9 when install build-essential -RUN sudo apt-get --yes install build-essential && \ - update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 100 --slave /usr/bin/g++ g++ /usr/bin/g++-8 && \ - update-alternatives --auto gcc +RUN sudo apt-get --yes install build-essential # Clone the repo and install the toolkit ENV OPENDR_DEVICE gpu -RUN git clone --depth 1 --recurse-submodules -j8 https://github.com/opendr-eu/opendr +RUN git clone --depth 1 --recurse-submodules -j8 https://github.com/opendr-eu/opendr -b cuda_upgrade WORKDIR "/opendr" RUN ./bin/install.sh From a0f0f58f9b3584bbbd4b4d898c0b706cc29d02de Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Mon, 14 Feb 2022 10:43:50 +0200 Subject: [PATCH 03/69] Update install.sh --- bin/install.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/bin/install.sh b/bin/install.sh index b33c62be34..ac3fb9e378 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -38,8 +38,8 @@ sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main # If working on GPU install GPU dependencies beforehand if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then - echo "[INFO] Installing mxnet-cu102==1.8.0. You can override this later if you are using a different CUDA version." - pip3 install mxnet-cu102==1.8.0 + echo "[INFO] Installing mxnet-cu110==1.8.0. You can override this later if you are using a different CUDA version." + pip3 install mxnet-cu110==1.8.0 fi # Build OpenDR @@ -53,4 +53,4 @@ fi make libopendr -deactivate \ No newline at end of file +deactivate From 388c2c130d59dc096a2aa6aa16174d0cf1998432 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Mon, 14 Feb 2022 10:44:34 +0200 Subject: [PATCH 04/69] Update install.sh --- bin/install.sh | 4 ---- 1 file changed, 4 deletions(-) diff --git a/bin/install.sh b/bin/install.sh index ac3fb9e378..8f42e5d8f4 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -46,10 +46,6 @@ fi make install_compilation_dependencies make install_runtime_dependencies -# Install GPU dependencies separately -if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then - pip install mxnet-cu110 -fi make libopendr From e15145a9e012556a08f7c559a89f56677d05554d Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Mon, 14 Feb 2022 10:44:56 +0200 Subject: [PATCH 05/69] Update dependencies.ini --- src/opendr/perception/object_detection_2d/dependencies.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/perception/object_detection_2d/dependencies.ini b/src/opendr/perception/object_detection_2d/dependencies.ini index 28678639ee..d70223da97 100644 --- a/src/opendr/perception/object_detection_2d/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/dependencies.ini @@ -2,7 +2,7 @@ # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=mxnet +python=mxnet==1.8.0 gluoncv==0.11.0b20210908 tqdm pycocotools>=2.0.4 From abd60b21b69dedc13cbd44da71a0558db3a9f738 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Thu, 24 Feb 2022 18:27:16 +0200 Subject: [PATCH 06/69] Updated pytorch to CUDA11.0 --- dependencies/dependencies.ini | 2 +- src/opendr/control/mobile_manipulation/dependencies.ini | 2 +- src/opendr/control/single_demo_grasp/dependencies.ini | 4 ++-- src/opendr/engine/dependencies.ini | 2 +- src/opendr/perception/activity_recognition/dependencies.ini | 4 ++-- src/opendr/perception/compressive_learning/dependencies.ini | 4 ++-- src/opendr/perception/face_recognition/dependencies.ini | 4 ++-- .../dependencies.ini | 4 ++-- .../perception/heart_anomaly_detection/dependencies.ini | 4 ++-- .../perception/multimodal_human_centric/dependencies.ini | 4 ++-- .../perception/object_detection_2d/detr/dependencies.ini | 4 ++-- .../perception/object_detection_2d/gem/dependencies.ini | 4 ++-- .../voxel_object_detection_3d/dependencies.ini | 4 ++-- .../perception/object_tracking_2d/fair_mot/dependencies.ini | 4 ++-- .../panoptic_segmentation/efficient_ps/dependencies.ini | 4 ++-- src/opendr/perception/pose_estimation/dependencies.ini | 4 ++-- src/opendr/perception/semantic_segmentation/dependencies.ini | 4 ++-- .../skeleton_based_action_recognition/dependencies.ini | 4 ++-- .../speech_recognition/edgespeechnets/dependencies.ini | 2 +- .../speech_recognition/matchboxnet/dependencies.ini | 2 +- .../speech_recognition/quadraticselfonn/dependencies.ini | 2 +- src/opendr/simulation/human_model_generation/dependencies.ini | 4 ++-- 22 files changed, 38 insertions(+), 38 deletions(-) diff --git a/dependencies/dependencies.ini b/dependencies/dependencies.ini index 8c8b096d9b..d2ca2164e4 100644 --- a/dependencies/dependencies.ini +++ b/dependencies/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.7.1+cu110 wheel [device] diff --git a/src/opendr/control/mobile_manipulation/dependencies.ini b/src/opendr/control/mobile_manipulation/dependencies.ini index cea4af6864..a4c0b7ed0d 100644 --- a/src/opendr/control/mobile_manipulation/dependencies.ini +++ b/src/opendr/control/mobile_manipulation/dependencies.ini @@ -10,7 +10,7 @@ python=vcstool [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.7.1+cu110 tensorboard numpy pyyaml diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini index f28de9de57..8743505119 100644 --- a/src/opendr/control/single_demo_grasp/dependencies.ini +++ b/src/opendr/control/single_demo_grasp/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 matplotlib>=2.2.2 imgaug==0.4.0 pillow>=8.3.2 diff --git a/src/opendr/engine/dependencies.ini b/src/opendr/engine/dependencies.ini index 9ff41be90c..c2ab2a07cc 100644 --- a/src/opendr/engine/dependencies.ini +++ b/src/opendr/engine/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.7.1+cu110 wheel Cython opencv-python==4.5.1.48 diff --git a/src/opendr/perception/activity_recognition/dependencies.ini b/src/opendr/perception/activity_recognition/dependencies.ini index a4b8e669e5..10f29490c3 100644 --- a/src/opendr/perception/activity_recognition/dependencies.ini +++ b/src/opendr/perception/activity_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tqdm onnx==1.8.0 onnxruntime==1.3.0 diff --git a/src/opendr/perception/compressive_learning/dependencies.ini b/src/opendr/perception/compressive_learning/dependencies.ini index 1332987d73..48d2d0cc9b 100644 --- a/src/opendr/perception/compressive_learning/dependencies.ini +++ b/src/opendr/perception/compressive_learning/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tensorboard>=2.4.1 tqdm diff --git a/src/opendr/perception/face_recognition/dependencies.ini b/src/opendr/perception/face_recognition/dependencies.ini index 7e582af726..0b6b7fd100 100644 --- a/src/opendr/perception/face_recognition/dependencies.ini +++ b/src/opendr/perception/face_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 bcolz>=1.2.1 onnx==1.8.0 onnxruntime==1.3.0 diff --git a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini index fa7f5cc649..c50d3ba32d 100644 --- a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini +++ b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tensorboardX>=2.0 matplotlib>=2.2.2 tqdm diff --git a/src/opendr/perception/heart_anomaly_detection/dependencies.ini b/src/opendr/perception/heart_anomaly_detection/dependencies.ini index a3ee7589d8..aa7f0783b9 100644 --- a/src/opendr/perception/heart_anomaly_detection/dependencies.ini +++ b/src/opendr/perception/heart_anomaly_detection/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tensorboard>=2.4.1 tqdm scikit-learn>=0.22 diff --git a/src/opendr/perception/multimodal_human_centric/dependencies.ini b/src/opendr/perception/multimodal_human_centric/dependencies.ini index 43a850268c..b0c78902a7 100644 --- a/src/opendr/perception/multimodal_human_centric/dependencies.ini +++ b/src/opendr/perception/multimodal_human_centric/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tensorboard>=2.4.1 tqdm imageio>=2.6.0 diff --git a/src/opendr/perception/object_detection_2d/detr/dependencies.ini b/src/opendr/perception/object_detection_2d/detr/dependencies.ini index afb5c1c44d..7418755881 100644 --- a/src/opendr/perception/object_detection_2d/detr/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/detr/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 pycocotools>=2.0.4 git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi scipy diff --git a/src/opendr/perception/object_detection_2d/gem/dependencies.ini b/src/opendr/perception/object_detection_2d/gem/dependencies.ini index 0c4c5e18ed..3f891c58c5 100644 --- a/src/opendr/perception/object_detection_2d/gem/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/gem/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 pillow>=8.3.2 opencv-python==4.5.1.48 pycocotools>=2.0.4 diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini index 6051426b06..f5d372c227 100644 --- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini +++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini index 3ff7f44eb3..926b48708f 100644 --- a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini +++ b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini b/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini index 9f895fc2c4..c33030f839 100644 --- a/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini +++ b/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini @@ -1,7 +1,7 @@ [runtime] python= - torch==1.7.1 - torchvision==0.8.2 + torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tqdm mmcv==0.5.9 future diff --git a/src/opendr/perception/pose_estimation/dependencies.ini b/src/opendr/perception/pose_estimation/dependencies.ini index 1b876e438a..8109addb88 100644 --- a/src/opendr/perception/pose_estimation/dependencies.ini +++ b/src/opendr/perception/pose_estimation/dependencies.ini @@ -2,8 +2,8 @@ # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format python-dependencies=cython -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/semantic_segmentation/dependencies.ini b/src/opendr/perception/semantic_segmentation/dependencies.ini index 76255d1fb3..d8f2972d67 100644 --- a/src/opendr/perception/semantic_segmentation/dependencies.ini +++ b/src/opendr/perception/semantic_segmentation/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tqdm imgaug>=0.4.0 pillow>=8.3.2 diff --git a/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini b/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini index c1a37265de..fa8df870e8 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini +++ b/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 tensorboardX>=2.0 matplotlib>=2.2.2 tqdm diff --git a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini index 76fdfafd59..9465a962dc 100644 --- a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini +++ b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.7.1+cu110 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini index 7399c98bdf..e0971de67b 100644 --- a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini +++ b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.7.1+cu110 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini index 76fdfafd59..9465a962dc 100644 --- a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini +++ b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.7.1+cu110 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/simulation/human_model_generation/dependencies.ini b/src/opendr/simulation/human_model_generation/dependencies.ini index 458d91a5a0..eac9e9f797 100644 --- a/src/opendr/simulation/human_model_generation/dependencies.ini +++ b/src/opendr/simulation/human_model_generation/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.7.1+cu110 + torchvision==0.8.2+cu110 pyglet==1.5.16 opencv-python==4.5.1.48 pillow>=8.3.2 From 5e9b5f7b9c885179e6334d5de58877553a472437 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Thu, 24 Feb 2022 18:52:13 +0200 Subject: [PATCH 07/69] Updated pytorch to CUDA11.0 --- bin/install.sh | 2 ++ dependencies/dependencies.ini | 2 +- src/opendr/control/mobile_manipulation/dependencies.ini | 2 +- src/opendr/control/single_demo_grasp/dependencies.ini | 4 ++-- src/opendr/engine/dependencies.ini | 2 +- src/opendr/perception/activity_recognition/dependencies.ini | 4 ++-- src/opendr/perception/compressive_learning/dependencies.ini | 4 ++-- src/opendr/perception/face_recognition/dependencies.ini | 4 ++-- .../dependencies.ini | 4 ++-- .../perception/heart_anomaly_detection/dependencies.ini | 4 ++-- .../perception/multimodal_human_centric/dependencies.ini | 4 ++-- .../perception/object_detection_2d/detr/dependencies.ini | 4 ++-- .../perception/object_detection_2d/gem/dependencies.ini | 4 ++-- .../voxel_object_detection_3d/dependencies.ini | 4 ++-- .../perception/object_tracking_2d/fair_mot/dependencies.ini | 4 ++-- .../panoptic_segmentation/efficient_ps/dependencies.ini | 4 ++-- src/opendr/perception/pose_estimation/dependencies.ini | 4 ++-- src/opendr/perception/semantic_segmentation/dependencies.ini | 4 ++-- .../skeleton_based_action_recognition/dependencies.ini | 4 ++-- .../speech_recognition/edgespeechnets/dependencies.ini | 2 +- .../speech_recognition/matchboxnet/dependencies.ini | 2 +- .../speech_recognition/quadraticselfonn/dependencies.ini | 2 +- src/opendr/simulation/human_model_generation/dependencies.ini | 4 ++-- 23 files changed, 40 insertions(+), 38 deletions(-) diff --git a/bin/install.sh b/bin/install.sh index 8f42e5d8f4..5ad5d54534 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -40,6 +40,8 @@ sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then echo "[INFO] Installing mxnet-cu110==1.8.0. You can override this later if you are using a different CUDA version." pip3 install mxnet-cu110==1.8.0 + echo "[INFO] Installing torch==1.7.1+cu110. You can override this later if you are using a different CUDA version." + pip3 install torch==1.7.1+cu110 torchvision==0.8.2+cu110 -f https://download.pytorch.org/whl/torch_stable.html fi # Build OpenDR diff --git a/dependencies/dependencies.ini b/dependencies/dependencies.ini index d2ca2164e4..8c8b096d9b 100644 --- a/dependencies/dependencies.ini +++ b/dependencies/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 +python=torch==1.7.1 wheel [device] diff --git a/src/opendr/control/mobile_manipulation/dependencies.ini b/src/opendr/control/mobile_manipulation/dependencies.ini index a4c0b7ed0d..cea4af6864 100644 --- a/src/opendr/control/mobile_manipulation/dependencies.ini +++ b/src/opendr/control/mobile_manipulation/dependencies.ini @@ -10,7 +10,7 @@ python=vcstool [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 +python=torch==1.7.1 tensorboard numpy pyyaml diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini index 8743505119..f28de9de57 100644 --- a/src/opendr/control/single_demo_grasp/dependencies.ini +++ b/src/opendr/control/single_demo_grasp/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 matplotlib>=2.2.2 imgaug==0.4.0 pillow>=8.3.2 diff --git a/src/opendr/engine/dependencies.ini b/src/opendr/engine/dependencies.ini index c2ab2a07cc..9ff41be90c 100644 --- a/src/opendr/engine/dependencies.ini +++ b/src/opendr/engine/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 +python=torch==1.7.1 wheel Cython opencv-python==4.5.1.48 diff --git a/src/opendr/perception/activity_recognition/dependencies.ini b/src/opendr/perception/activity_recognition/dependencies.ini index 10f29490c3..a4b8e669e5 100644 --- a/src/opendr/perception/activity_recognition/dependencies.ini +++ b/src/opendr/perception/activity_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tqdm onnx==1.8.0 onnxruntime==1.3.0 diff --git a/src/opendr/perception/compressive_learning/dependencies.ini b/src/opendr/perception/compressive_learning/dependencies.ini index 48d2d0cc9b..1332987d73 100644 --- a/src/opendr/perception/compressive_learning/dependencies.ini +++ b/src/opendr/perception/compressive_learning/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tensorboard>=2.4.1 tqdm diff --git a/src/opendr/perception/face_recognition/dependencies.ini b/src/opendr/perception/face_recognition/dependencies.ini index 0b6b7fd100..7e582af726 100644 --- a/src/opendr/perception/face_recognition/dependencies.ini +++ b/src/opendr/perception/face_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 bcolz>=1.2.1 onnx==1.8.0 onnxruntime==1.3.0 diff --git a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini index c50d3ba32d..fa7f5cc649 100644 --- a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini +++ b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tensorboardX>=2.0 matplotlib>=2.2.2 tqdm diff --git a/src/opendr/perception/heart_anomaly_detection/dependencies.ini b/src/opendr/perception/heart_anomaly_detection/dependencies.ini index aa7f0783b9..a3ee7589d8 100644 --- a/src/opendr/perception/heart_anomaly_detection/dependencies.ini +++ b/src/opendr/perception/heart_anomaly_detection/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tensorboard>=2.4.1 tqdm scikit-learn>=0.22 diff --git a/src/opendr/perception/multimodal_human_centric/dependencies.ini b/src/opendr/perception/multimodal_human_centric/dependencies.ini index b0c78902a7..43a850268c 100644 --- a/src/opendr/perception/multimodal_human_centric/dependencies.ini +++ b/src/opendr/perception/multimodal_human_centric/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tensorboard>=2.4.1 tqdm imageio>=2.6.0 diff --git a/src/opendr/perception/object_detection_2d/detr/dependencies.ini b/src/opendr/perception/object_detection_2d/detr/dependencies.ini index 7418755881..afb5c1c44d 100644 --- a/src/opendr/perception/object_detection_2d/detr/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/detr/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 pycocotools>=2.0.4 git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi scipy diff --git a/src/opendr/perception/object_detection_2d/gem/dependencies.ini b/src/opendr/perception/object_detection_2d/gem/dependencies.ini index 3f891c58c5..0c4c5e18ed 100644 --- a/src/opendr/perception/object_detection_2d/gem/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/gem/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 pillow>=8.3.2 opencv-python==4.5.1.48 pycocotools>=2.0.4 diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini index f5d372c227..6051426b06 100644 --- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini +++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini index 926b48708f..3ff7f44eb3 100644 --- a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini +++ b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini b/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini index c33030f839..9f895fc2c4 100644 --- a/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini +++ b/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini @@ -1,7 +1,7 @@ [runtime] python= - torch==1.7.1+cu110 - torchvision==0.8.2+cu110 + torch==1.7.1 + torchvision==0.8.2 tqdm mmcv==0.5.9 future diff --git a/src/opendr/perception/pose_estimation/dependencies.ini b/src/opendr/perception/pose_estimation/dependencies.ini index 8109addb88..1b876e438a 100644 --- a/src/opendr/perception/pose_estimation/dependencies.ini +++ b/src/opendr/perception/pose_estimation/dependencies.ini @@ -2,8 +2,8 @@ # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format python-dependencies=cython -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/semantic_segmentation/dependencies.ini b/src/opendr/perception/semantic_segmentation/dependencies.ini index d8f2972d67..76255d1fb3 100644 --- a/src/opendr/perception/semantic_segmentation/dependencies.ini +++ b/src/opendr/perception/semantic_segmentation/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tqdm imgaug>=0.4.0 pillow>=8.3.2 diff --git a/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini b/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini index fa8df870e8..c1a37265de 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini +++ b/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 tensorboardX>=2.0 matplotlib>=2.2.2 tqdm diff --git a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini index 9465a962dc..76fdfafd59 100644 --- a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini +++ b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 +python=torch==1.7.1 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini index e0971de67b..7399c98bdf 100644 --- a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini +++ b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 +python=torch==1.7.1 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini index 9465a962dc..76fdfafd59 100644 --- a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini +++ b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 +python=torch==1.7.1 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/simulation/human_model_generation/dependencies.ini b/src/opendr/simulation/human_model_generation/dependencies.ini index eac9e9f797..458d91a5a0 100644 --- a/src/opendr/simulation/human_model_generation/dependencies.ini +++ b/src/opendr/simulation/human_model_generation/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1+cu110 - torchvision==0.8.2+cu110 +python=torch==1.7.1 + torchvision==0.8.2 pyglet==1.5.16 opencv-python==4.5.1.48 pillow>=8.3.2 From b8b3db72c1b3b9b15841b86557fa2dba4af95a1f Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Thu, 24 Feb 2022 19:03:10 +0200 Subject: [PATCH 08/69] Upgraded to CUDA11.1 --- Dockerfile-cuda | 2 +- bin/install.sh | 6 +++--- dependencies/dependencies.ini | 2 +- src/opendr/control/mobile_manipulation/dependencies.ini | 2 +- src/opendr/control/single_demo_grasp/Makefile | 4 ++-- src/opendr/control/single_demo_grasp/dependencies.ini | 4 ++-- src/opendr/engine/dependencies.ini | 2 +- src/opendr/perception/activity_recognition/dependencies.ini | 4 ++-- src/opendr/perception/compressive_learning/dependencies.ini | 4 ++-- src/opendr/perception/face_recognition/dependencies.ini | 4 ++-- .../dependencies.ini | 4 ++-- .../perception/heart_anomaly_detection/dependencies.ini | 4 ++-- .../perception/multimodal_human_centric/dependencies.ini | 4 ++-- .../perception/object_detection_2d/detr/dependencies.ini | 4 ++-- .../perception/object_detection_2d/gem/dependencies.ini | 4 ++-- .../voxel_object_detection_3d/dependencies.ini | 4 ++-- .../perception/object_tracking_2d/fair_mot/dependencies.ini | 4 ++-- .../panoptic_segmentation/efficient_ps/dependencies.ini | 4 ++-- src/opendr/perception/pose_estimation/dependencies.ini | 4 ++-- .../perception/semantic_segmentation/dependencies.ini | 4 ++-- .../skeleton_based_action_recognition/dependencies.ini | 4 ++-- .../speech_recognition/edgespeechnets/dependencies.ini | 2 +- .../speech_recognition/matchboxnet/dependencies.ini | 2 +- .../speech_recognition/quadraticselfonn/dependencies.ini | 2 +- .../simulation/human_model_generation/dependencies.ini | 4 ++-- 25 files changed, 44 insertions(+), 44 deletions(-) diff --git a/Dockerfile-cuda b/Dockerfile-cuda index 92a832ed86..5e460f5b4f 100644 --- a/Dockerfile-cuda +++ b/Dockerfile-cuda @@ -1,4 +1,4 @@ -FROM nvidia/cuda:11.0-devel +FROM nvidia/cuda:11.1-devel # Install dependencies RUN apt-get update && \ diff --git a/bin/install.sh b/bin/install.sh index 5ad5d54534..cefd3d2cff 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -38,10 +38,10 @@ sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main # If working on GPU install GPU dependencies beforehand if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then - echo "[INFO] Installing mxnet-cu110==1.8.0. You can override this later if you are using a different CUDA version." - pip3 install mxnet-cu110==1.8.0 + echo "[INFO] Installing mxnet-cu111==1.8.0. You can override this later if you are using a different CUDA version." + pip3 install mxnet-cu111==1.8.0 echo "[INFO] Installing torch==1.7.1+cu110. You can override this later if you are using a different CUDA version." - pip3 install torch==1.7.1+cu110 torchvision==0.8.2+cu110 -f https://download.pytorch.org/whl/torch_stable.html + pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html fi # Build OpenDR diff --git a/dependencies/dependencies.ini b/dependencies/dependencies.ini index 8c8b096d9b..f5cb44d7b2 100644 --- a/dependencies/dependencies.ini +++ b/dependencies/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.8.1 wheel [device] diff --git a/src/opendr/control/mobile_manipulation/dependencies.ini b/src/opendr/control/mobile_manipulation/dependencies.ini index cea4af6864..b4823382ea 100644 --- a/src/opendr/control/mobile_manipulation/dependencies.ini +++ b/src/opendr/control/mobile_manipulation/dependencies.ini @@ -10,7 +10,7 @@ python=vcstool [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.8.1 tensorboard numpy pyyaml diff --git a/src/opendr/control/single_demo_grasp/Makefile b/src/opendr/control/single_demo_grasp/Makefile index f8e9b18617..27d96f6af8 100644 --- a/src/opendr/control/single_demo_grasp/Makefile +++ b/src/opendr/control/single_demo_grasp/Makefile @@ -17,9 +17,9 @@ MAKECMDGOALS = release endif ifeq ($(OPENDR_DEVICE),gpu) -DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cu110/torch1.7/index.html +DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html else -DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.7/index.html +DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html endif .PHONY: release install_compilation_dependencies install_runtime_dependencies diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini index f28de9de57..96ca6a75cf 100644 --- a/src/opendr/control/single_demo_grasp/dependencies.ini +++ b/src/opendr/control/single_demo_grasp/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 matplotlib>=2.2.2 imgaug==0.4.0 pillow>=8.3.2 diff --git a/src/opendr/engine/dependencies.ini b/src/opendr/engine/dependencies.ini index 9ff41be90c..214bcf53f6 100644 --- a/src/opendr/engine/dependencies.ini +++ b/src/opendr/engine/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.8.1 wheel Cython opencv-python==4.5.1.48 diff --git a/src/opendr/perception/activity_recognition/dependencies.ini b/src/opendr/perception/activity_recognition/dependencies.ini index a4b8e669e5..9c4d142926 100644 --- a/src/opendr/perception/activity_recognition/dependencies.ini +++ b/src/opendr/perception/activity_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tqdm onnx==1.8.0 onnxruntime==1.3.0 diff --git a/src/opendr/perception/compressive_learning/dependencies.ini b/src/opendr/perception/compressive_learning/dependencies.ini index 1332987d73..2e4704ac86 100644 --- a/src/opendr/perception/compressive_learning/dependencies.ini +++ b/src/opendr/perception/compressive_learning/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tensorboard>=2.4.1 tqdm diff --git a/src/opendr/perception/face_recognition/dependencies.ini b/src/opendr/perception/face_recognition/dependencies.ini index 7e582af726..94f4fefe7a 100644 --- a/src/opendr/perception/face_recognition/dependencies.ini +++ b/src/opendr/perception/face_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 bcolz>=1.2.1 onnx==1.8.0 onnxruntime==1.3.0 diff --git a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini index fa7f5cc649..1096f367dd 100644 --- a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini +++ b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tensorboardX>=2.0 matplotlib>=2.2.2 tqdm diff --git a/src/opendr/perception/heart_anomaly_detection/dependencies.ini b/src/opendr/perception/heart_anomaly_detection/dependencies.ini index a3ee7589d8..59f53bb169 100644 --- a/src/opendr/perception/heart_anomaly_detection/dependencies.ini +++ b/src/opendr/perception/heart_anomaly_detection/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tensorboard>=2.4.1 tqdm scikit-learn>=0.22 diff --git a/src/opendr/perception/multimodal_human_centric/dependencies.ini b/src/opendr/perception/multimodal_human_centric/dependencies.ini index 43a850268c..86c3d14d82 100644 --- a/src/opendr/perception/multimodal_human_centric/dependencies.ini +++ b/src/opendr/perception/multimodal_human_centric/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tensorboard>=2.4.1 tqdm imageio>=2.6.0 diff --git a/src/opendr/perception/object_detection_2d/detr/dependencies.ini b/src/opendr/perception/object_detection_2d/detr/dependencies.ini index afb5c1c44d..75203bff81 100644 --- a/src/opendr/perception/object_detection_2d/detr/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/detr/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 pycocotools>=2.0.4 git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi scipy diff --git a/src/opendr/perception/object_detection_2d/gem/dependencies.ini b/src/opendr/perception/object_detection_2d/gem/dependencies.ini index 0c4c5e18ed..ebd9de4c8f 100644 --- a/src/opendr/perception/object_detection_2d/gem/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/gem/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 pillow>=8.3.2 opencv-python==4.5.1.48 pycocotools>=2.0.4 diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini index 6051426b06..378ca56ec2 100644 --- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini +++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini index 3ff7f44eb3..f0c43b5127 100644 --- a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini +++ b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini b/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini index 9f895fc2c4..576e1267fb 100644 --- a/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini +++ b/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini @@ -1,7 +1,7 @@ [runtime] python= - torch==1.7.1 - torchvision==0.8.2 + torch==1.8.1 + torchvision==0.9.1 tqdm mmcv==0.5.9 future diff --git a/src/opendr/perception/pose_estimation/dependencies.ini b/src/opendr/perception/pose_estimation/dependencies.ini index 1b876e438a..aea6ef17bc 100644 --- a/src/opendr/perception/pose_estimation/dependencies.ini +++ b/src/opendr/perception/pose_estimation/dependencies.ini @@ -2,8 +2,8 @@ # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format python-dependencies=cython -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/semantic_segmentation/dependencies.ini b/src/opendr/perception/semantic_segmentation/dependencies.ini index 76255d1fb3..d0a21e1d58 100644 --- a/src/opendr/perception/semantic_segmentation/dependencies.ini +++ b/src/opendr/perception/semantic_segmentation/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tqdm imgaug>=0.4.0 pillow>=8.3.2 diff --git a/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini b/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini index c1a37265de..6b836c6741 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini +++ b/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 tensorboardX>=2.0 matplotlib>=2.2.2 tqdm diff --git a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini index 76fdfafd59..62cf13056e 100644 --- a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini +++ b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.8.1 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini index 7399c98bdf..8c766fd322 100644 --- a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini +++ b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.8.1 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini index 76fdfafd59..62cf13056e 100644 --- a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini +++ b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 +python=torch==1.8.1 librosa==0.8.0 numpy>=1.19 numba==0.48.0 diff --git a/src/opendr/simulation/human_model_generation/dependencies.ini b/src/opendr/simulation/human_model_generation/dependencies.ini index 458d91a5a0..2683edb57e 100644 --- a/src/opendr/simulation/human_model_generation/dependencies.ini +++ b/src/opendr/simulation/human_model_generation/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.7.1 - torchvision==0.8.2 +python=torch==1.8.1 + torchvision==0.9.1 pyglet==1.5.16 opencv-python==4.5.1.48 pillow>=8.3.2 From 9c821b9bf2465123a16968dfcf2473e0e75dbdf8 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Thu, 24 Feb 2022 23:00:35 +0200 Subject: [PATCH 09/69] DCNv2 update --- .../perception/object_tracking_2d/fair_mot/dependencies.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini index f0c43b5127..c400dd515a 100644 --- a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini +++ b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini @@ -9,7 +9,7 @@ python=torch==1.8.1 tqdm onnx==1.8.0 onnxruntime==1.3.0 - git+https://github.com/MatthewHowe/DCNv2 + git+https://github.com/jinfagang/DCNv2_latest yacs==0.1.8 progress>=1.5 lap>=0.4.0 From cfb4aaa073ebcd17eee6b3eef1a1bf3998037ad5 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Thu, 24 Feb 2022 23:23:18 +0200 Subject: [PATCH 10/69] DCNv2 update --- .../perception/object_tracking_2d/fair_mot/dependencies.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini index c400dd515a..2f68816532 100644 --- a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini +++ b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini @@ -9,7 +9,7 @@ python=torch==1.8.1 tqdm onnx==1.8.0 onnxruntime==1.3.0 - git+https://github.com/jinfagang/DCNv2_latest + git+https://github.com/tteepe/DCNv2 yacs==0.1.8 progress>=1.5 lap>=0.4.0 From c580281f06b5f299b6da8acc2195e5a00ad11d8c Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Thu, 24 Feb 2022 23:52:28 +0200 Subject: [PATCH 11/69] DCNv2 update --- .../perception/object_tracking_2d/fair_mot/dependencies.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini index 2f68816532..da5c2d5ef9 100644 --- a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini +++ b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini @@ -9,7 +9,7 @@ python=torch==1.8.1 tqdm onnx==1.8.0 onnxruntime==1.3.0 - git+https://github.com/tteepe/DCNv2 + git+https://github.com/cidl-auth/DCNv2 yacs==0.1.8 progress>=1.5 lap>=0.4.0 From 0e76ad65739ad4878a4715eb6ff9ffcb899c4de6 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 25 Feb 2022 00:07:45 +0200 Subject: [PATCH 12/69] Detectron update --- src/opendr/control/single_demo_grasp/dependencies.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini index 96ca6a75cf..59cf0f7c73 100644 --- a/src/opendr/control/single_demo_grasp/dependencies.ini +++ b/src/opendr/control/single_demo_grasp/dependencies.ini @@ -9,4 +9,4 @@ python=torch==1.8.1 opendr=opendr-toolkit-engine -post-install=python3 -m pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.7/index.html \ No newline at end of file +post-install=python3 -m pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html \ No newline at end of file From 4504adacd5ae455df2b77a0c35788dc651f173cf Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 25 Feb 2022 00:12:37 +0200 Subject: [PATCH 13/69] Update installation.md --- docs/reference/installation.md | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/docs/reference/installation.md b/docs/reference/installation.md index c14a929601..857e2aaae3 100644 --- a/docs/reference/installation.md +++ b/docs/reference/installation.md @@ -55,18 +55,8 @@ make unittest make ctests ``` -If you plan to use GPU-enabled functionalities, then you are advised to install [CUDA 10.2](https://developer.nvidia.com/cuda-10.2-download-archive). -To do so, you can follow these steps: -```bash -sudo apt install gcc-8 g++-8 gcc-9 g++-9 -sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-8 8 --slave /usr/bin/g++ g++ /usr/bin/g++-8 -sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 9 --slave /usr/bin/g++ g++ /usr/bin/g++-9 -echo "Please switch to GCC 8" -sudo update-alternatives --config gcc -``` -Then, you can install CUDA, along CuDNN. -You can also refer to this [dockerfile](https://github.com/opendr-eu/opendr/blob/master/Dockerfile-cuda) for installation instructions. -Note that NVIDIA 30xx GPUs may not be fully supported, due to CUDA limitations. +If you plan to use GPU-enabled functionalities, then you are advised to install [CUDA 11.1](https://developer.nvidia.com/cuda-11.1.0-download-archive), along with [CuDNN](https://developer.nvidia.com/cudnn). + # Installing using *pip* @@ -95,14 +85,16 @@ This is not needed for newer CPUs. The same OpenDR package is used for both CPU and GPU systems. However, you need to have the appropriate GPU-enabled dependencies installed to use a GPU with OpenDR. If you plan to use GPU, then you should first install [mxnet-cuda](https://mxnet.apache.org/versions/1.4.1/install/index.html?platform=Linux&language=Python&processor=CPU) and [detectron2](https://detectron2.readthedocs.io/en/latest/tutorials/install.html). -For example, if you stick with the default PyTorch version (1.7) and use CUDA10.2, then you can simply follow: +For example, if you stick with the default PyTorch version (1.8) and use CUDA11.1, then you can simply follow: ```bash sudo apt install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev libeigen3-dev python3 -m venv venv source venv/bin/activate pip install wheel +pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html +python3 -m pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.7/index.html -pip install mxnet-cu102==1.8.0 +pip3 install mxnet-cu111==1.8.0 pip install opendr-toolkit-engine pip install opendr-toolkit ``` From 07143cb7ab57d597fedfe4ea297f57ae58ae6266 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 25 Feb 2022 10:39:05 +0200 Subject: [PATCH 14/69] More strict stable-baselines3 ver --- src/opendr/control/mobile_manipulation/dependencies.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/control/mobile_manipulation/dependencies.ini b/src/opendr/control/mobile_manipulation/dependencies.ini index b4823382ea..9be684a69e 100644 --- a/src/opendr/control/mobile_manipulation/dependencies.ini +++ b/src/opendr/control/mobile_manipulation/dependencies.ini @@ -18,7 +18,7 @@ python=torch==1.8.1 pyparsing pillow>=8.3.2 scipy - stable-baselines3>=0.10.0 + stable-baselines3==0.10.0 gym>=0.19.0 cloudpickle>=1.5.0 defusedxml From 73787aec8f1700a1615dacb993f70c175cb8547f Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 25 Feb 2022 10:45:26 +0200 Subject: [PATCH 15/69] Updated dep version --- src/opendr/control/mobile_manipulation/dependencies.ini | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/opendr/control/mobile_manipulation/dependencies.ini b/src/opendr/control/mobile_manipulation/dependencies.ini index 9be684a69e..7c5aa6638f 100644 --- a/src/opendr/control/mobile_manipulation/dependencies.ini +++ b/src/opendr/control/mobile_manipulation/dependencies.ini @@ -18,8 +18,8 @@ python=torch==1.8.1 pyparsing pillow>=8.3.2 scipy - stable-baselines3==0.10.0 - gym>=0.19.0 + stable-baselines3==1.1.0 + gym==0.21.0 cloudpickle>=1.5.0 defusedxml netifaces From f89ecce9beba09e4acc6eb74cbc4db376b9f3987 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 25 Feb 2022 14:22:20 +0200 Subject: [PATCH 16/69] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50adff26f5..735d460db0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ Released on XX, XXth, 2022. - Simplified the installation process for pip by including the appropriate post-installation scripts ([#201](https://github.com/opendr-eu/opendr/pull/201)). - Improved the structure of the toolkit by moving `io` from `utils` to `engine.helper` ([#201](https://github.com/opendr-eu/opendr/pull/201)). - Added support for `post-install` scripts and `opendr` dependencies in `.ini` files ([#201](https://github.com/opendr-eu/opendr/pull/201)). + - Updated toolkit to support CUDA 11.1 - Bug Fixes: - Updated wheel building pipeline to include missing files and removed unnecessary dependencies ([#200](https://github.com/opendr-eu/opendr/pull/200)). - Dependency Updates: @@ -18,4 +19,4 @@ Released on XX, XXth, 2022. ## Version 1.0 -Released on December 31th, 2021. \ No newline at end of file +Released on December 31th, 2021. From 3bf4a790d9e3cf3a1e399abf74f2220b69a9987d Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Wed, 2 Mar 2022 06:48:37 +0200 Subject: [PATCH 17/69] Update tests_suite.yml --- .github/workflows/tests_suite.yml | 126 +++++++++--------------------- 1 file changed, 35 insertions(+), 91 deletions(-) diff --git a/.github/workflows/tests_suite.yml b/.github/workflows/tests_suite.yml index c8998fb03d..a2baa3c791 100644 --- a/.github/workflows/tests_suite.yml +++ b/.github/workflows/tests_suite.yml @@ -36,7 +36,7 @@ jobs: - uses: actions/checkout@v2 if: matrix.os == 'ubuntu-20.04' || github.event.pull_request.draft == false with: - submodules: true + submodules: true - name: Set up Python 3.8 if: matrix.os == 'ubuntu-20.04' || github.event.pull_request.draft == false uses: actions/setup-python@v2 @@ -84,7 +84,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true + submodules: true - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -113,7 +113,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true + submodules: true - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -128,6 +128,7 @@ jobs: - name: Upload wheel as artifact uses: actions/upload-artifact@v2 with: + name: wheel-artifact path: dist/*.tar.gz build-docker: @@ -137,7 +138,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true + submodules: true - name: Build image run: | docker build --tag opendr/opendr-toolkit:cpu_test --file Dockerfile . @@ -145,6 +146,7 @@ jobs: - name: Upload image artifact uses: actions/upload-artifact@v2 with: + name: docker-artifact path: cpu_test.zip test-wheel: @@ -177,7 +179,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true + submodules: true - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -192,94 +194,12 @@ jobs: - name: Test Wheel run: | export DISABLE_BCOLZ_AVX2=true - sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev libeigen3-dev + sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev python3-dev python3 -m venv venv source venv/bin/activate - pip install wheel - while read f; do - package=$(sed "s/_/-/g" <<< $f) - arr=(${package//// }) - # Get the tool name (if there is a base path before) - if [ ! -z ${arr[1]} ]; then - package=${arr[1]} - fi - echo "Installing $package package" - if [ "$package" == "opendr" ]; then - pip install ./artifact/artifact/opendr-toolkit-*.tar.gz - else - pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz - fi - done < packages.txt - python -m unittest discover -s tests/sources/tools/${{ matrix.package }} - test-wheel-separate: - needs: build-wheel - if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }} - strategy: - matrix: - os: [ubuntu-20.04] - package: - - engine - - utils - - perception/activity_recognition - - perception/compressive_learning - - perception/face_recognition - - perception/heart_anomaly_detection - - perception/multimodal_human_centric - - perception/object_tracking_2d - - perception/pose_estimation - - perception/speech_recognition - - perception/skeleton_based_action_recognition - - perception/semantic_segmentation - - perception/object_detection_2d - - perception/facial_expression_recognition - # - perception/object_detection_3d - # - control/mobile_manipulation - # - simulation/human_model_generation - # - control/single_demo_grasp - # - perception/object_tracking_3d - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - with: - submodules: true - - name: Set up Python 3.8 - uses: actions/setup-python@v2 - with: - python-version: 3.8 - - name: Download artifact - uses: actions/download-artifact@v2 - with: - path: artifact - - name: Get branch name - id: branch-name - uses: tj-actions/branch-names@v5.1 - - name: Test Wheel - run: | - export DISABLE_BCOLZ_AVX2=true - sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev libeigen3-dev - python3 -m venv venv - source venv/bin/activate - pip install wheel - # Install engine and requirements for other packages - pip install ./artifact/artifact/opendr-toolkit-engine-*.tar.gz - # The following two are dependecies for some other packages and pip cannot automatically install them if they are not on a repo - pip install ./artifact/artifact/opendr-toolkit-compressive-learning-*.tar.gz - pip install ./artifact/artifact/opendr-toolkit-object-detection-2d-*.tar.gz - - # Install specific package for testing - package=$(sed "s/_/-/g" <<< ${{ matrix.package }}) - arr=(${package//// }) - if [ ! -z ${arr[1]} ]; then - package=${arr[1]} - fi - echo "Installing $package package" - # Utils contains hyperparameter tuning - if [ "$package" == "utils" ]; then - pip install ./artifact/artifact/opendr-toolkit-hyperparameter-tuner-*.tar.gz - - else - pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz - fi + wget https://raw.githubusercontent.com/opendr-eu/opendr/${{ steps.branch-name.outputs.current_branch }}/dependencies/pip_requirements.txt + cat pip_requirements.txt | xargs -n 1 -L 1 pip install + pip install ./artifact/artifact/*.tar.gz python -m unittest discover -s tests/sources/tools/${{ matrix.package }} test-docker: needs: build-docker @@ -319,3 +239,27 @@ jobs: docker run --name toolkit -i opendr/opendr-toolkit:cpu_test bash docker start toolkit docker exec -i toolkit bash -c "source bin/activate.sh && source tests/sources/tools/control/mobile_manipulation/run_ros.sh && python -m unittest discover -s tests/sources/tools/${{ matrix.package }}" + delete-docker-artifacts: + needs: [build-docker, test-docker] + if: ${{ always() }} + strategy: + matrix: + os: [ubuntu-20.04] + runs-on: ${{ matrix.os }} + steps: + - name: Delete docker artifacts + uses: geekyeggo/delete-artifact@v1 + with: + name: docker-artifact + delete-wheel-artifacts: + needs: [build-wheel, test-wheel] + if: ${{ always() }} + strategy: + matrix: + os: [ubuntu-20.04] + runs-on: ${{ matrix.os }} + steps: + - name: Delete wheel artifacts + uses: geekyeggo/delete-artifact@v1 + with: + name: wheel-artifact From a4ca32f73d8f6a28d05fb4f4fa516c05763d807e Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Wed, 2 Mar 2022 06:49:07 +0200 Subject: [PATCH 18/69] Update tests_suite_develop.yml --- .github/workflows/tests_suite_develop.yml | 136 ++++++---------------- 1 file changed, 38 insertions(+), 98 deletions(-) diff --git a/.github/workflows/tests_suite_develop.yml b/.github/workflows/tests_suite_develop.yml index f5ab5b0398..a2baa3c791 100644 --- a/.github/workflows/tests_suite_develop.yml +++ b/.github/workflows/tests_suite_develop.yml @@ -1,6 +1,8 @@ -name: Test Suite (develop) +name: Test Suite (master) on: + pull_request: + types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled] schedule: - cron: '0 23 * * *' @@ -34,8 +36,7 @@ jobs: - uses: actions/checkout@v2 if: matrix.os == 'ubuntu-20.04' || github.event.pull_request.draft == false with: - submodules: true - ref: develop + submodules: true - name: Set up Python 3.8 if: matrix.os == 'ubuntu-20.04' || github.event.pull_request.draft == false uses: actions/setup-python@v2 @@ -83,8 +84,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true - ref: develop + submodules: true - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -113,8 +113,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true - ref: develop + submodules: true - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -129,6 +128,7 @@ jobs: - name: Upload wheel as artifact uses: actions/upload-artifact@v2 with: + name: wheel-artifact path: dist/*.tar.gz build-docker: @@ -138,8 +138,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true - ref: develop + submodules: true - name: Build image run: | docker build --tag opendr/opendr-toolkit:cpu_test --file Dockerfile . @@ -147,6 +146,7 @@ jobs: - name: Upload image artifact uses: actions/upload-artifact@v2 with: + name: docker-artifact path: cpu_test.zip test-wheel: @@ -179,8 +179,7 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true - ref: develop + submodules: true - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -195,95 +194,12 @@ jobs: - name: Test Wheel run: | export DISABLE_BCOLZ_AVX2=true - sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev libeigen3-dev + sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev python3-dev python3 -m venv venv source venv/bin/activate - pip install wheel - while read f; do - package=$(sed "s/_/-/g" <<< $f) - arr=(${package//// }) - # Get the tool name (if there is a base path before) - if [ ! -z ${arr[1]} ]; then - package=${arr[1]} - fi - echo "Installing $package package" - if [ "$package" == "opendr" ]; then - pip install ./artifact/artifact/opendr-toolkit-*.tar.gz - else - pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz - fi - done < packages.txt - python -m unittest discover -s tests/sources/tools/${{ matrix.package }} - test-wheel-separate: - needs: build-wheel - if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }} - strategy: - matrix: - os: [ubuntu-20.04] - package: - - engine - - utils - - perception/activity_recognition - - perception/compressive_learning - - perception/face_recognition - - perception/heart_anomaly_detection - - perception/multimodal_human_centric - - perception/object_tracking_2d - - perception/pose_estimation - - perception/speech_recognition - - perception/skeleton_based_action_recognition - - perception/semantic_segmentation - - perception/object_detection_2d - - perception/facial_expression_recognition - # - perception/object_detection_3d - # - control/mobile_manipulation - # - simulation/human_model_generation - # - control/single_demo_grasp - # - perception/object_tracking_3d - runs-on: ubuntu-20.04 - steps: - - uses: actions/checkout@v2 - with: - submodules: true - ref: develop - - name: Set up Python 3.8 - uses: actions/setup-python@v2 - with: - python-version: 3.8 - - name: Download artifact - uses: actions/download-artifact@v2 - with: - path: artifact - - name: Get branch name - id: branch-name - uses: tj-actions/branch-names@v5.1 - - name: Test Wheel - run: | - export DISABLE_BCOLZ_AVX2=true - sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev libeigen3-dev - python3 -m venv venv - source venv/bin/activate - pip install wheel - # Install engine and requirements for other packages - pip install ./artifact/artifact/opendr-toolkit-engine-*.tar.gz - # The following two are dependecies for some other packages and pip cannot automatically install them if they are not on a repo - pip install ./artifact/artifact/opendr-toolkit-compressive-learning-*.tar.gz - pip install ./artifact/artifact/opendr-toolkit-object-detection-2d-*.tar.gz - - # Install specific package for testing - package=$(sed "s/_/-/g" <<< ${{ matrix.package }}) - arr=(${package//// }) - if [ ! -z ${arr[1]} ]; then - package=${arr[1]} - fi - echo "Installing $package package" - # Utils contains hyperparameter tuning - if [ "$package" == "utils" ]; then - pip install ./artifact/artifact/opendr-toolkit-hyperparameter-tuner-*.tar.gz - - else - pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz - fi + wget https://raw.githubusercontent.com/opendr-eu/opendr/${{ steps.branch-name.outputs.current_branch }}/dependencies/pip_requirements.txt + cat pip_requirements.txt | xargs -n 1 -L 1 pip install + pip install ./artifact/artifact/*.tar.gz python -m unittest discover -s tests/sources/tools/${{ matrix.package }} test-docker: needs: build-docker @@ -323,3 +239,27 @@ jobs: docker run --name toolkit -i opendr/opendr-toolkit:cpu_test bash docker start toolkit docker exec -i toolkit bash -c "source bin/activate.sh && source tests/sources/tools/control/mobile_manipulation/run_ros.sh && python -m unittest discover -s tests/sources/tools/${{ matrix.package }}" + delete-docker-artifacts: + needs: [build-docker, test-docker] + if: ${{ always() }} + strategy: + matrix: + os: [ubuntu-20.04] + runs-on: ${{ matrix.os }} + steps: + - name: Delete docker artifacts + uses: geekyeggo/delete-artifact@v1 + with: + name: docker-artifact + delete-wheel-artifacts: + needs: [build-wheel, test-wheel] + if: ${{ always() }} + strategy: + matrix: + os: [ubuntu-20.04] + runs-on: ${{ matrix.os }} + steps: + - name: Delete wheel artifacts + uses: geekyeggo/delete-artifact@v1 + with: + name: wheel-artifact From d4adb8bdfc30b697856a656cde5c62158fecba4b Mon Sep 17 00:00:00 2001 From: ad-daniel Date: Thu, 3 Mar 2022 14:48:54 +0100 Subject: [PATCH 19/69] Restore develop workflow --- .github/workflows/tests_suite_develop.yml | 136 ++++++++++++++++------ 1 file changed, 98 insertions(+), 38 deletions(-) diff --git a/.github/workflows/tests_suite_develop.yml b/.github/workflows/tests_suite_develop.yml index a2baa3c791..f5ab5b0398 100644 --- a/.github/workflows/tests_suite_develop.yml +++ b/.github/workflows/tests_suite_develop.yml @@ -1,8 +1,6 @@ -name: Test Suite (master) +name: Test Suite (develop) on: - pull_request: - types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled] schedule: - cron: '0 23 * * *' @@ -36,7 +34,8 @@ jobs: - uses: actions/checkout@v2 if: matrix.os == 'ubuntu-20.04' || github.event.pull_request.draft == false with: - submodules: true + submodules: true + ref: develop - name: Set up Python 3.8 if: matrix.os == 'ubuntu-20.04' || github.event.pull_request.draft == false uses: actions/setup-python@v2 @@ -84,7 +83,8 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true + submodules: true + ref: develop - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -113,7 +113,8 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true + submodules: true + ref: develop - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -128,7 +129,6 @@ jobs: - name: Upload wheel as artifact uses: actions/upload-artifact@v2 with: - name: wheel-artifact path: dist/*.tar.gz build-docker: @@ -138,7 +138,8 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true + submodules: true + ref: develop - name: Build image run: | docker build --tag opendr/opendr-toolkit:cpu_test --file Dockerfile . @@ -146,7 +147,6 @@ jobs: - name: Upload image artifact uses: actions/upload-artifact@v2 with: - name: docker-artifact path: cpu_test.zip test-wheel: @@ -179,7 +179,8 @@ jobs: steps: - uses: actions/checkout@v2 with: - submodules: true + submodules: true + ref: develop - name: Set up Python 3.8 uses: actions/setup-python@v2 with: @@ -194,12 +195,95 @@ jobs: - name: Test Wheel run: | export DISABLE_BCOLZ_AVX2=true - sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev python3-dev + sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev libeigen3-dev python3 -m venv venv source venv/bin/activate - wget https://raw.githubusercontent.com/opendr-eu/opendr/${{ steps.branch-name.outputs.current_branch }}/dependencies/pip_requirements.txt - cat pip_requirements.txt | xargs -n 1 -L 1 pip install - pip install ./artifact/artifact/*.tar.gz + pip install wheel + while read f; do + package=$(sed "s/_/-/g" <<< $f) + arr=(${package//// }) + # Get the tool name (if there is a base path before) + if [ ! -z ${arr[1]} ]; then + package=${arr[1]} + fi + echo "Installing $package package" + if [ "$package" == "opendr" ]; then + pip install ./artifact/artifact/opendr-toolkit-*.tar.gz + else + pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz + fi + done < packages.txt + python -m unittest discover -s tests/sources/tools/${{ matrix.package }} + test-wheel-separate: + needs: build-wheel + if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }} + strategy: + matrix: + os: [ubuntu-20.04] + package: + - engine + - utils + - perception/activity_recognition + - perception/compressive_learning + - perception/face_recognition + - perception/heart_anomaly_detection + - perception/multimodal_human_centric + - perception/object_tracking_2d + - perception/pose_estimation + - perception/speech_recognition + - perception/skeleton_based_action_recognition + - perception/semantic_segmentation + - perception/object_detection_2d + - perception/facial_expression_recognition + # - perception/object_detection_3d + # - control/mobile_manipulation + # - simulation/human_model_generation + # - control/single_demo_grasp + # - perception/object_tracking_3d + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v2 + with: + submodules: true + ref: develop + - name: Set up Python 3.8 + uses: actions/setup-python@v2 + with: + python-version: 3.8 + - name: Download artifact + uses: actions/download-artifact@v2 + with: + path: artifact + - name: Get branch name + id: branch-name + uses: tj-actions/branch-names@v5.1 + - name: Test Wheel + run: | + export DISABLE_BCOLZ_AVX2=true + sudo apt -y install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev libeigen3-dev + python3 -m venv venv + source venv/bin/activate + pip install wheel + # Install engine and requirements for other packages + pip install ./artifact/artifact/opendr-toolkit-engine-*.tar.gz + # The following two are dependecies for some other packages and pip cannot automatically install them if they are not on a repo + pip install ./artifact/artifact/opendr-toolkit-compressive-learning-*.tar.gz + pip install ./artifact/artifact/opendr-toolkit-object-detection-2d-*.tar.gz + + # Install specific package for testing + package=$(sed "s/_/-/g" <<< ${{ matrix.package }}) + arr=(${package//// }) + if [ ! -z ${arr[1]} ]; then + package=${arr[1]} + fi + echo "Installing $package package" + # Utils contains hyperparameter tuning + if [ "$package" == "utils" ]; then + pip install ./artifact/artifact/opendr-toolkit-hyperparameter-tuner-*.tar.gz + + else + pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz + fi python -m unittest discover -s tests/sources/tools/${{ matrix.package }} test-docker: needs: build-docker @@ -239,27 +323,3 @@ jobs: docker run --name toolkit -i opendr/opendr-toolkit:cpu_test bash docker start toolkit docker exec -i toolkit bash -c "source bin/activate.sh && source tests/sources/tools/control/mobile_manipulation/run_ros.sh && python -m unittest discover -s tests/sources/tools/${{ matrix.package }}" - delete-docker-artifacts: - needs: [build-docker, test-docker] - if: ${{ always() }} - strategy: - matrix: - os: [ubuntu-20.04] - runs-on: ${{ matrix.os }} - steps: - - name: Delete docker artifacts - uses: geekyeggo/delete-artifact@v1 - with: - name: docker-artifact - delete-wheel-artifacts: - needs: [build-wheel, test-wheel] - if: ${{ always() }} - strategy: - matrix: - os: [ubuntu-20.04] - runs-on: ${{ matrix.os }} - steps: - - name: Delete wheel artifacts - uses: geekyeggo/delete-artifact@v1 - with: - name: wheel-artifact From faac23f5238f93f244e5b609613866c6e84fddb0 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 4 Mar 2022 01:10:24 +0200 Subject: [PATCH 20/69] Small consistent fix --- docs/reference/installation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/installation.md b/docs/reference/installation.md index 857e2aaae3..1a69890e10 100644 --- a/docs/reference/installation.md +++ b/docs/reference/installation.md @@ -91,10 +91,10 @@ sudo apt install python3.8-venv libfreetype6-dev git build-essential cmake pytho python3 -m venv venv source venv/bin/activate pip install wheel -pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html +pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html python3 -m pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.7/index.html -pip3 install mxnet-cu111==1.8.0 +pip install mxnet-cu111==1.8.0 pip install opendr-toolkit-engine pip install opendr-toolkit ``` From cb0fa11a31313037de14b62628250bdc2c35a223 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 4 Mar 2022 01:24:47 +0200 Subject: [PATCH 21/69] Update installation.md --- docs/reference/installation.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/reference/installation.md b/docs/reference/installation.md index 1a69890e10..0f0541d979 100644 --- a/docs/reference/installation.md +++ b/docs/reference/installation.md @@ -92,8 +92,7 @@ python3 -m venv venv source venv/bin/activate pip install wheel pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html -python3 -m pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html -pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.7/index.html +pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html pip install mxnet-cu111==1.8.0 pip install opendr-toolkit-engine pip install opendr-toolkit From aaf3677f785daded86d199bca1b19036fcbd51f2 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 4 Mar 2022 01:25:09 +0200 Subject: [PATCH 22/69] Update install.sh --- bin/install.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/install.sh b/bin/install.sh index 86584d81c9..fdb2112bcd 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -39,7 +39,7 @@ sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then echo "[INFO] Installing mxnet-cu111==1.8.0. You can override this later if you are using a different CUDA version." pip3 install mxnet-cu111==1.8.0 - echo "[INFO] Installing torch==1.7.1+cu110. You can override this later if you are using a different CUDA version." + echo "[INFO] Installing torch==1.8.1+cu110. You can override this later if you are using a different CUDA version." pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html fi From 2256873c27a0ea5eac6aa77d8d697107b3c3083b Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Tue, 8 Mar 2022 09:08:06 +0200 Subject: [PATCH 23/69] Update to CUDA11.2 --- Dockerfile-cuda | 2 +- bin/install.sh | 6 +++--- docs/reference/installation.md | 6 +++--- src/opendr/control/single_demo_grasp/Makefile | 8 +------- src/opendr/control/single_demo_grasp/dependencies.ini | 2 +- 5 files changed, 9 insertions(+), 15 deletions(-) diff --git a/Dockerfile-cuda b/Dockerfile-cuda index 5e460f5b4f..1a0d080fda 100644 --- a/Dockerfile-cuda +++ b/Dockerfile-cuda @@ -1,4 +1,4 @@ -FROM nvidia/cuda:11.1-devel +FROM nvidia/cuda:11.2.0-devel-ubuntu20.04 # Install dependencies RUN apt-get update && \ diff --git a/bin/install.sh b/bin/install.sh index fdb2112bcd..feb24e81ac 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -37,9 +37,9 @@ sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main # If working on GPU install GPU dependencies as needed if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then - echo "[INFO] Installing mxnet-cu111==1.8.0. You can override this later if you are using a different CUDA version." - pip3 install mxnet-cu111==1.8.0 - echo "[INFO] Installing torch==1.8.1+cu110. You can override this later if you are using a different CUDA version." + echo "[INFO] Installing mxnet-cu112==1.8.0post0. You can override this later if you are using a different CUDA version." + pip3 install mxnet-cu112==1.8.0post0 + echo "[INFO] Installing torch==1.8.1+cu111. You can override this later if you are using a different CUDA version." pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html fi diff --git a/docs/reference/installation.md b/docs/reference/installation.md index 0f0541d979..1f6d662d52 100644 --- a/docs/reference/installation.md +++ b/docs/reference/installation.md @@ -85,15 +85,15 @@ This is not needed for newer CPUs. The same OpenDR package is used for both CPU and GPU systems. However, you need to have the appropriate GPU-enabled dependencies installed to use a GPU with OpenDR. If you plan to use GPU, then you should first install [mxnet-cuda](https://mxnet.apache.org/versions/1.4.1/install/index.html?platform=Linux&language=Python&processor=CPU) and [detectron2](https://detectron2.readthedocs.io/en/latest/tutorials/install.html). -For example, if you stick with the default PyTorch version (1.8) and use CUDA11.1, then you can simply follow: +For example, if you stick with the default PyTorch version (1.8) and use CUDA11.2, then you can simply follow: ```bash sudo apt install python3.8-venv libfreetype6-dev git build-essential cmake python3-dev wget libopenblas-dev libsndfile1 libboost-dev libeigen3-dev python3 -m venv venv source venv/bin/activate pip install wheel pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html -pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html -pip install mxnet-cu111==1.8.0 +pip install 'git+https://github.com/facebookresearch/detectron2.git' +pip install mxnet-cu112==1.8.0post0 pip install opendr-toolkit-engine pip install opendr-toolkit ``` diff --git a/src/opendr/control/single_demo_grasp/Makefile b/src/opendr/control/single_demo_grasp/Makefile index 27d96f6af8..0b50a1bf76 100644 --- a/src/opendr/control/single_demo_grasp/Makefile +++ b/src/opendr/control/single_demo_grasp/Makefile @@ -16,12 +16,6 @@ ifeq ($(MAKECMDGOALS),) MAKECMDGOALS = release endif -ifeq ($(OPENDR_DEVICE),gpu) -DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cu111/torch1.8/index.html -else -DETECTRON_WHEEL=https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html -endif - .PHONY: release install_compilation_dependencies install_runtime_dependencies release: install_compilation_dependencies @@ -31,7 +25,7 @@ install_runtime_dependencies: install_compilation_dependencies: @+echo "#"; echo "# * Install Compilation Dependencies for single demonstration grasping *"; echo "#" - @+python3 -m pip install detectron2==0.5 -f \$(DETECTRON_WHEEL) + @+python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git' @./install_single_demo_grasp.sh help: diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini index 59cf0f7c73..9a814d7569 100644 --- a/src/opendr/control/single_demo_grasp/dependencies.ini +++ b/src/opendr/control/single_demo_grasp/dependencies.ini @@ -9,4 +9,4 @@ python=torch==1.8.1 opendr=opendr-toolkit-engine -post-install=python3 -m pip install detectron2==0.5 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cpu/torch1.8/index.html \ No newline at end of file +post-install=python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' \ No newline at end of file From 18860aab6348b22edf815fe05b4e4d0637e62bd9 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Tue, 8 Mar 2022 09:11:38 +0200 Subject: [PATCH 24/69] Minor typo fix --- docs/reference/object-detection-2d-yolov3.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/object-detection-2d-yolov3.md b/docs/reference/object-detection-2d-yolov3.md index 38cbb298a8..f9092819a8 100644 --- a/docs/reference/object-detection-2d-yolov3.md +++ b/docs/reference/object-detection-2d-yolov3.md @@ -5,7 +5,7 @@ The *yolov3* module contains the *YOLOv3DetectorLearner* class, which inherits f ### Class YOLOv3DetectorLearner Bases: `engine.learners.Learner` -The *YOLOv3DetectorLearner* class is a wrapper of the SSD detector[[1]](#yolo-1) +The *YOLOv3DetectorLearner* class is a wrapper of the YOLO detector[[1]](#yolo-1) [GluonCV implementation](https://github.com/dmlc/gluon-cv/blob/master/gluoncv/model_zoo/yolo/yolo3.py). It can be used to perform object detection on images (inference) as well as train new object detection models. From e762070c19d2cfd80df867b02728573a69c94ccf Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Tue, 8 Mar 2022 09:31:08 +0200 Subject: [PATCH 25/69] Updated detectron install --- src/opendr/control/single_demo_grasp/dependencies.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini index 9a814d7569..1791c967cd 100644 --- a/src/opendr/control/single_demo_grasp/dependencies.ini +++ b/src/opendr/control/single_demo_grasp/dependencies.ini @@ -9,4 +9,4 @@ python=torch==1.8.1 opendr=opendr-toolkit-engine -post-install=python -m pip install 'git+https://github.com/facebookresearch/detectron2.git' \ No newline at end of file +post-install=python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git' \ No newline at end of file From 31c3664d604312d6179090424f343b716b6b60d3 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Tue, 8 Mar 2022 10:30:07 +0200 Subject: [PATCH 26/69] Included compilation depedencies for detectron --- src/opendr/control/mobile_manipulation/dependencies.ini | 2 -- src/opendr/control/single_demo_grasp/dependencies.ini | 4 ++++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/opendr/control/mobile_manipulation/dependencies.ini b/src/opendr/control/mobile_manipulation/dependencies.ini index 7c5aa6638f..bdea48e92d 100644 --- a/src/opendr/control/mobile_manipulation/dependencies.ini +++ b/src/opendr/control/mobile_manipulation/dependencies.ini @@ -19,9 +19,7 @@ python=torch==1.8.1 pillow>=8.3.2 scipy stable-baselines3==1.1.0 - gym==0.21.0 cloudpickle>=1.5.0 - defusedxml netifaces opendr=opendr-toolkit-engine diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini index 1791c967cd..8895b3dac7 100644 --- a/src/opendr/control/single_demo_grasp/dependencies.ini +++ b/src/opendr/control/single_demo_grasp/dependencies.ini @@ -1,3 +1,7 @@ +[compilation] +python=torch==1.8.1 + torchvision==0.9.1 + [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format From 67775eccb98254e6f50f40b7cf709a7521b44f1f Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Tue, 8 Mar 2022 12:59:44 +0200 Subject: [PATCH 27/69] Updated CPU/GPU support for pip --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index b3102f0580..c10bde3504 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ OpenDR aims to develop, train, deploy and evaluate deep learning models that imp OpenDR can be installed in the following ways: 1. By *cloning* this repository (CPU/GPU support) -2. Using *pip* (CPU only) +2. Using *pip* (CPU/GPU support only) 3. Using *docker* (CPU/GPU support) You can find detailed installation instruction in the [documentation](docs/reference/installation.md). From b6b21b4432e440085c8de709ddcdb78bd6860d58 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 11 Mar 2022 10:34:42 +0200 Subject: [PATCH 28/69] Updated numba version --- src/opendr/perception/object_detection_2d/dependencies.ini | 2 +- .../voxel_object_detection_3d/dependencies.ini | 2 +- .../speech_recognition/edgespeechnets/dependencies.ini | 2 +- .../perception/speech_recognition/matchboxnet/dependencies.ini | 2 +- .../speech_recognition/quadraticselfonn/dependencies.ini | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/opendr/perception/object_detection_2d/dependencies.ini b/src/opendr/perception/object_detection_2d/dependencies.ini index d70223da97..c6beccc16e 100644 --- a/src/opendr/perception/object_detection_2d/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/dependencies.ini @@ -7,7 +7,7 @@ python=mxnet==1.8.0 tqdm pycocotools>=2.0.4 easydict - numba==0.48.0 + numba==0.53.0 linux=libopenblas-dev diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini index 946b5c23a0..c045e430e9 100644 --- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini +++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini @@ -12,7 +12,7 @@ python=torch==1.8.1 protobuf==3.11.3 pybind11==2.6.2 llvmlite>=0.31.0 - numba>=0.48.0 + numba>=0.53.0 pyyaml>=5.3 scikit-image>=0.16.2 easydict>=1.9 diff --git a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini index 62cf13056e..bcf5a3a98d 100644 --- a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini +++ b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini @@ -4,7 +4,7 @@ python=torch==1.8.1 librosa==0.8.0 numpy>=1.19 - numba==0.48.0 + numba==0.53.0 linux=libsndfile1 opendr=opendr-toolkit-engine diff --git a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini index 8c766fd322..f2571d9b82 100644 --- a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini +++ b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini @@ -4,7 +4,7 @@ python=torch==1.8.1 librosa==0.8.0 numpy>=1.19 - numba==0.48.0 + numba==0.53.0 linux=libsndfile1 opendr=opendr-toolkit-engine diff --git a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini index 62cf13056e..bcf5a3a98d 100644 --- a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini +++ b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini @@ -4,7 +4,7 @@ python=torch==1.8.1 librosa==0.8.0 numpy>=1.19 - numba==0.48.0 + numba==0.53.0 linux=libsndfile1 opendr=opendr-toolkit-engine From 6f5a7efa1dc42724af88ff98b07c060e81d26860 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 11 Mar 2022 10:43:57 +0200 Subject: [PATCH 29/69] Removed OPENDR_DEVICE --- bin/activate.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/bin/activate.sh b/bin/activate.sh index ca4b30fe5a..0c01405dca 100755 --- a/bin/activate.sh +++ b/bin/activate.sh @@ -4,9 +4,4 @@ export PYTHONPATH=$OPENDR_HOME/src:$PYTHONPATH export PYTHON=python3 export LD_LIBRARY_PATH=$OPENDR_HOME/lib:$LD_LIBRARY_PATH -if [[ -z "${OPENDR_DEVICE}" ]]; then - echo "[INFO] Set available device to CPU. You can manually change this by running 'export OPENDR_DEVICE=gpu'." - export OPENDR_DEVICE=cpu -fi - source venv/bin/activate From 7ce9520aa20101f2dbeeff3f55b52905db3cd3c0 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 11 Mar 2022 13:21:29 +0200 Subject: [PATCH 30/69] Added support for changing inference device during testing --- .../mobile_manipulation/test_mobile_manipulation.py | 3 ++- .../single_demo_grasp/test_single_demo_grasp.py | 10 +++++----- .../activity_recognition/cox3d/test_cox3d_learner.py | 5 ++++- .../activity_recognition/x3d/test_x3d_learner.py | 6 +++++- .../face_recognition/test_face_recognition.py | 7 ++++--- .../test_pstbln.py | 5 +++-- .../object_detection_2d/centernet/test_centernet.py | 5 +++-- .../perception/object_detection_2d/detr/test_detr.py | 5 ++--- .../perception/object_detection_2d/gem/test_gem.py | 6 ++++-- .../object_detection_2d/retinaface/test_retinaface.py | 4 +++- .../perception/object_detection_2d/ssd/test_ssd.py | 4 +++- .../object_detection_2d/yolov3/test_yolo3.py | 4 +++- .../test_object_detection_3d.py | 4 ++-- .../deep_sort/test_object_tracking_2d_deep_sort.py | 4 ++-- .../fair_mot/test_object_tracking_2d_fair_mot.py | 4 ++-- .../test_lightweight_open_pose.py | 5 +++-- .../skeleton_based_action_recognition/test_pstgcn.py | 5 +++-- .../skeleton_based_action_recognition/test_stbln.py | 5 +++-- .../skeleton_based_action_recognition/test_stgcn.py | 5 +++-- .../skeleton_based_action_recognition/test_tagcn.py | 5 +++-- .../edgespeechnets/test_edgespeechnets.py | 6 ++++-- .../speech_recognition/matchboxnet/test_matchboxnet.py | 6 ++++-- .../quadraticselfonn/test_quadraticselfonn.py | 6 ++++-- 23 files changed, 74 insertions(+), 45 deletions(-) diff --git a/tests/sources/tools/control/mobile_manipulation/test_mobile_manipulation.py b/tests/sources/tools/control/mobile_manipulation/test_mobile_manipulation.py index df93c9d7a9..fbb59c7c53 100644 --- a/tests/sources/tools/control/mobile_manipulation/test_mobile_manipulation.py +++ b/tests/sources/tools/control/mobile_manipulation/test_mobile_manipulation.py @@ -21,6 +21,7 @@ from opendr.control.mobile_manipulation import MobileRLLearner from pathlib import Path +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' TEST_ITERS = 3 TEMP_SAVE_DIR = Path(__file__).parent / "mobile_manipulation_tmp" EVAL_ENV_CONFIG = { @@ -56,7 +57,7 @@ def setUpClass(cls): "**********************************") cls.env = create_env(EVAL_ENV_CONFIG, task='rndstartrndgoal', node_handle="train_env", wrap_in_dummy_vec=True, flatten_obs=True) - cls.learner = MobileRLLearner(cls.env, device="cpu", iters=TEST_ITERS, temp_path=str(TEMP_SAVE_DIR)) + cls.learner = MobileRLLearner(cls.env, device=device, iters=TEST_ITERS, temp_path=str(TEMP_SAVE_DIR)) @classmethod def tearDownClass(cls): diff --git a/tests/sources/tools/control/single_demo_grasp/test_single_demo_grasp.py b/tests/sources/tools/control/single_demo_grasp/test_single_demo_grasp.py index 73108854f9..8802e06576 100644 --- a/tests/sources/tools/control/single_demo_grasp/test_single_demo_grasp.py +++ b/tests/sources/tools/control/single_demo_grasp/test_single_demo_grasp.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. - -import os import shutil import unittest import torch @@ -27,7 +25,9 @@ # OpenDR dependencies from opendr.control.single_demo_grasp import SingleDemoGraspLearner from opendr.engine.data import Image +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' # variable definitions here dir_temp = os.path.join(".", "tests", "sources", "tools", "control", "single_demo_grasp", "sdg_temp") @@ -36,7 +36,7 @@ def load_old_weights(): cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")) cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml") - cfg.MODEL.DEVICE = 'cpu' + cfg.MODEL.DEVICE = device model = build_model(cfg) return list(model.parameters())[0].clone() @@ -46,7 +46,7 @@ def load_weights_from_file(path_to_model): cfg = get_cfg() cfg.merge_from_file(model_zoo.get_config_file("COCO-Keypoints/keypoint_rcnn_R_50_FPN_3x.yaml")) cfg.MODEL.WEIGHTS = path_to_model # check if it's necessary - cfg.MODEL.DEVICE = 'cpu' + cfg.MODEL.DEVICE = device model = build_model(cfg) DetectionCheckpointer(model).load(path_to_model) @@ -75,7 +75,7 @@ def setUpClass(cls): print("\n\n**********************************\nTEST SingleDemoGrasp Learner\n" "**********************************") cls.learner = SingleDemoGraspLearner(object_name='pendulum', data_directory=dir_temp, lr=0.0008, batch_size=1, - num_workers=2, num_classes=1, iters=10, threshold=0.8, device='cpu', + num_workers=2, num_classes=1, iters=10, threshold=0.8, device=device, img_per_step=2) # Download all required files for testing diff --git a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py index 8d1598de46..f5476e90e8 100644 --- a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py @@ -22,6 +22,9 @@ from opendr.engine.data import Image from pathlib import Path from logging import getLogger +import os + +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' logger = getLogger(__name__) @@ -38,7 +41,7 @@ def setUpClass(cls): # Download model weights CoX3DLearner.download(path=Path(cls.temp_dir) / "weights", model_names={_BACKBONE}) cls.learner = CoX3DLearner( - device="cpu", temp_path=str(cls.temp_dir), iters=1, batch_size=2, backbone=_BACKBONE, num_workers=0, + device=device, temp_path=str(cls.temp_dir), iters=1, batch_size=2, backbone=_BACKBONE, num_workers=0, ) # Download mini dataset diff --git a/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py b/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py index 960be05b00..aaaa9e800f 100644 --- a/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py @@ -22,6 +22,10 @@ from pathlib import Path from logging import getLogger +import os + +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + logger = getLogger(__name__) _BACKBONE = "xs" @@ -37,7 +41,7 @@ def setUpClass(cls): # Download model weights X3DLearner.download(path=Path(cls.temp_dir) / "weights", model_names={_BACKBONE}) cls.learner = X3DLearner( - device="cpu", temp_path=str(cls.temp_dir), iters=1, batch_size=2, backbone=_BACKBONE, num_workers=0, + device=device, temp_path=str(cls.temp_dir), iters=1, batch_size=2, backbone=_BACKBONE, num_workers=0, ) # Download mini dataset diff --git a/tests/sources/tools/perception/face_recognition/test_face_recognition.py b/tests/sources/tools/perception/face_recognition/test_face_recognition.py index 7f02c6031c..5d7704d28f 100644 --- a/tests/sources/tools/perception/face_recognition/test_face_recognition.py +++ b/tests/sources/tools/perception/face_recognition/test_face_recognition.py @@ -13,12 +13,13 @@ # limitations under the License. import numpy as np -import os import shutil import unittest from opendr.perception.face_recognition import FaceRecognitionLearner from opendr.engine.datasets import ExternalDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -39,7 +40,7 @@ class TestFaceRecognitionLearner(unittest.TestCase): @classmethod def setUpClass(cls): cls.temp_dir = './face_recognition_temp' - cls.recognizer = FaceRecognitionLearner(backbone='mobilefacenet', mode='backbone_only', device="cpu", + cls.recognizer = FaceRecognitionLearner(backbone='mobilefacenet', mode='backbone_only', device=device, temp_path=cls.temp_dir, batch_size=10, checkpoint_after_iter=0) # Download all required files for testing cls.recognizer.download(cls.temp_dir, mode='pretrained') @@ -51,7 +52,7 @@ def tearDownClass(cls): rmdir(cls.temp_dir) def test_fit(self): - recognizer = FaceRecognitionLearner(backbone='mobilefacenet', mode='full', device="cpu", + recognizer = FaceRecognitionLearner(backbone='mobilefacenet', mode='full', device=device, temp_path=self.temp_dir, iters=2, batch_size=2, checkpoint_after_iter=0) dataset_path = os.path.join(self.temp_dir, 'test_data/images') diff --git a/tests/sources/tools/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/test_pstbln.py b/tests/sources/tools/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/test_pstbln.py index 4720edde23..7a272f7776 100644 --- a/tests/sources/tools/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/test_pstbln.py +++ b/tests/sources/tools/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/test_pstbln.py @@ -14,12 +14,13 @@ import unittest import shutil -import os import torch import numpy as np from opendr.perception.facial_expression_recognition import ProgressiveSpatioTemporalBLNLearner from opendr.engine.datasets import ExternalDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -50,7 +51,7 @@ def setUpClass(cls): cls.temp_dir = PATH_ cls.logging_path = LOG_PATH_ cls.pstbln_facial_expression_classifier = ProgressiveSpatioTemporalBLNLearner( - device="cpu", temp_path=cls.temp_dir, + device=device, temp_path=cls.temp_dir, batch_size=5, epochs=1, checkpoint_after_iter=1, val_batch_size=5, dataset_name='CASIA', num_class=6, num_point=309, num_person=1, diff --git a/tests/sources/tools/perception/object_detection_2d/centernet/test_centernet.py b/tests/sources/tools/perception/object_detection_2d/centernet/test_centernet.py index cbbc446cab..cfe99ffb9f 100644 --- a/tests/sources/tools/perception/object_detection_2d/centernet/test_centernet.py +++ b/tests/sources/tools/perception/object_detection_2d/centernet/test_centernet.py @@ -16,11 +16,12 @@ import cv2 import gc import shutil -import os import numpy as np from opendr.perception.object_detection_2d import CenterNetDetectorLearner from opendr.perception.object_detection_2d import WiderPersonDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -45,7 +46,7 @@ def setUpClass(cls): cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "object_detection_2d", "centernet", "centernet_temp") - cls.detector = CenterNetDetectorLearner(device="cpu", temp_path=cls.temp_dir, batch_size=1, epochs=1, + cls.detector = CenterNetDetectorLearner(device=device, temp_path=cls.temp_dir, batch_size=1, epochs=1, checkpoint_after_iter=0, lr=1e-4, img_size=320, num_workers=0) # Download all required files for testing cls.detector.download(mode="pretrained") diff --git a/tests/sources/tools/perception/object_detection_2d/detr/test_detr.py b/tests/sources/tools/perception/object_detection_2d/detr/test_detr.py index 16b5dbcaa3..90cde0b408 100644 --- a/tests/sources/tools/perception/object_detection_2d/detr/test_detr.py +++ b/tests/sources/tools/perception/object_detection_2d/detr/test_detr.py @@ -15,16 +15,15 @@ import sys import unittest import shutil -import os import torch import warnings from torch.jit import TracerWarning from opendr.engine.datasets import ExternalDataset from opendr.perception.object_detection_2d import DetrLearner from PIL import Image +import os - -DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu" +DEVICE = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' print("Using device:", DEVICE) print("Using device:", DEVICE, file=sys.stderr) diff --git a/tests/sources/tools/perception/object_detection_2d/gem/test_gem.py b/tests/sources/tools/perception/object_detection_2d/gem/test_gem.py index 331aa215b4..9790836971 100644 --- a/tests/sources/tools/perception/object_detection_2d/gem/test_gem.py +++ b/tests/sources/tools/perception/object_detection_2d/gem/test_gem.py @@ -15,15 +15,17 @@ import sys import unittest import shutil -import os import torch import warnings from opendr.engine.datasets import ExternalDataset from opendr.perception.object_detection_2d import GemLearner +import os + +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' from PIL import Image -DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu" +DEVICE = device print("Using device:", DEVICE) print("Using device:", DEVICE, file=sys.stderr) diff --git a/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py b/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py index f8f52c9390..6a5fed214c 100644 --- a/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py +++ b/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py @@ -20,7 +20,9 @@ import numpy as np from opendr.perception.object_detection_2d import RetinaFaceLearner from opendr.perception.object_detection_2d import WiderFaceDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -45,7 +47,7 @@ def setUpClass(cls): cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "object_detection_2d", "retinaface", "retinaface_temp") - cls.detector = RetinaFaceLearner(device="cpu", temp_path=cls.temp_dir, batch_size=1, epochs=1, + cls.detector = RetinaFaceLearner(device=device, temp_path=cls.temp_dir, batch_size=1, epochs=1, checkpoint_after_iter=0, lr=1e-4) # Download all required files for testing cls.detector.download(mode="pretrained") diff --git a/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py b/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py index 3f3f03e1f5..71ea6dfa0f 100644 --- a/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py +++ b/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py @@ -20,7 +20,9 @@ import numpy as np from opendr.perception.object_detection_2d import SingleShotDetectorLearner from opendr.perception.object_detection_2d import WiderPersonDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -45,7 +47,7 @@ def setUpClass(cls): cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "object_detection_2d", "ssd", "ssd_temp") - cls.detector = SingleShotDetectorLearner(device="cpu", temp_path=cls.temp_dir, batch_size=1, epochs=1, + cls.detector = SingleShotDetectorLearner(device=device, temp_path=cls.temp_dir, batch_size=1, epochs=1, checkpoint_after_iter=0, lr=1e-4, num_workers=0) # Download all required files for testing cls.detector.download(mode="pretrained") diff --git a/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py b/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py index 71b2466263..9d551edbfc 100644 --- a/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py +++ b/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py @@ -20,7 +20,9 @@ import numpy as np from opendr.perception.object_detection_2d import YOLOv3DetectorLearner from opendr.perception.object_detection_2d import WiderPersonDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -45,7 +47,7 @@ def setUpClass(cls): cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "object_detection_2d", "yolov3", "yolov3_temp") - cls.detector = YOLOv3DetectorLearner(device="cpu", temp_path=cls.temp_dir, batch_size=1, epochs=1, + cls.detector = YOLOv3DetectorLearner(device=device, temp_path=cls.temp_dir, batch_size=1, epochs=1, checkpoint_after_iter=0, lr=1e-4, num_workers=0, img_size=320) # Download all required files for testing cls.detector.download(mode="pretrained") diff --git a/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py b/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py index 2372294671..4c628ed52a 100644 --- a/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py +++ b/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py @@ -20,9 +20,9 @@ from opendr.engine.datasets import PointCloudsDatasetIterator from opendr.perception.object_detection_3d import VoxelObjectDetection3DLearner from opendr.perception.object_detection_3d import KittiDataset, LabeledPointCloudsDatasetIterator +import os - -DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu" +DEVICE = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' print("Using device:", DEVICE) print("Using device:", DEVICE, file=sys.stderr) diff --git a/tests/sources/tools/perception/object_tracking_2d/deep_sort/test_object_tracking_2d_deep_sort.py b/tests/sources/tools/perception/object_tracking_2d/deep_sort/test_object_tracking_2d_deep_sort.py index 0c4e35256f..f703f87ff9 100644 --- a/tests/sources/tools/perception/object_tracking_2d/deep_sort/test_object_tracking_2d_deep_sort.py +++ b/tests/sources/tools/perception/object_tracking_2d/deep_sort/test_object_tracking_2d_deep_sort.py @@ -15,7 +15,6 @@ import sys import unittest import shutil -import os import torch from opendr.perception.object_tracking_2d import ObjectTracking2DDeepSortLearner from opendr.perception.object_tracking_2d import ( @@ -26,8 +25,9 @@ MotDataset, RawMotWithDetectionsDatasetIterator, ) +import os -DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu" +DEVICE = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' print("Using device:", DEVICE) print("Using device:", DEVICE, file=sys.stderr) diff --git a/tests/sources/tools/perception/object_tracking_2d/fair_mot/test_object_tracking_2d_fair_mot.py b/tests/sources/tools/perception/object_tracking_2d/fair_mot/test_object_tracking_2d_fair_mot.py index b75717a668..7fbea8a277 100644 --- a/tests/sources/tools/perception/object_tracking_2d/fair_mot/test_object_tracking_2d_fair_mot.py +++ b/tests/sources/tools/perception/object_tracking_2d/fair_mot/test_object_tracking_2d_fair_mot.py @@ -15,7 +15,6 @@ import sys import unittest import shutil -import os import torch from opendr.perception.object_tracking_2d import ( MotDataset, @@ -23,8 +22,9 @@ RawMotDatasetIterator, ) from opendr.perception.object_tracking_2d import ObjectTracking2DFairMotLearner +import os -DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu" +DEVICE = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' print("Using device:", DEVICE) print("Using device:", DEVICE, file=sys.stderr) diff --git a/tests/sources/tools/perception/pose_estimation/lightweight_open_pose/test_lightweight_open_pose.py b/tests/sources/tools/perception/pose_estimation/lightweight_open_pose/test_lightweight_open_pose.py index 58c00eb751..3b0a063dd9 100644 --- a/tests/sources/tools/perception/pose_estimation/lightweight_open_pose/test_lightweight_open_pose.py +++ b/tests/sources/tools/perception/pose_estimation/lightweight_open_pose/test_lightweight_open_pose.py @@ -14,13 +14,14 @@ import unittest import shutil -import os import torch from opendr.perception.pose_estimation import LightweightOpenPoseLearner from opendr.engine.datasets import ExternalDataset from opendr.engine.data import Image import warnings +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -45,7 +46,7 @@ def setUpClass(cls): cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "pose_estimation", "lightweight_open_pose", "lw_open_pose_temp") - cls.pose_estimator = LightweightOpenPoseLearner(device="cpu", temp_path=cls.temp_dir, batch_size=1, epochs=1, + cls.pose_estimator = LightweightOpenPoseLearner(device=device, temp_path=cls.temp_dir, batch_size=1, epochs=1, checkpoint_after_iter=0, num_workers=1) # Download all required files for testing cls.pose_estimator.download(mode="pretrained") diff --git a/tests/sources/tools/perception/skeleton_based_action_recognition/test_pstgcn.py b/tests/sources/tools/perception/skeleton_based_action_recognition/test_pstgcn.py index 1ec279c2ae..3d76d93a63 100644 --- a/tests/sources/tools/perception/skeleton_based_action_recognition/test_pstgcn.py +++ b/tests/sources/tools/perception/skeleton_based_action_recognition/test_pstgcn.py @@ -14,12 +14,13 @@ import unittest import shutil -import os import torch import numpy as np from opendr.perception.skeleton_based_action_recognition import ProgressiveSpatioTemporalGCNLearner from opendr.engine.datasets import ExternalDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -48,7 +49,7 @@ def setUpClass(cls): cls.temp_dir = PATH_ cls.logging_path = LOG_PATH_ cls.pstgcn_action_classifier = ProgressiveSpatioTemporalGCNLearner( - device="cpu", temp_path=cls.temp_dir, + device=device, temp_path=cls.temp_dir, batch_size=2, epochs=1, checkpoint_after_iter=1, val_batch_size=2, dataset_name='nturgbd_cv', diff --git a/tests/sources/tools/perception/skeleton_based_action_recognition/test_stbln.py b/tests/sources/tools/perception/skeleton_based_action_recognition/test_stbln.py index f95d01f570..adc353971b 100644 --- a/tests/sources/tools/perception/skeleton_based_action_recognition/test_stbln.py +++ b/tests/sources/tools/perception/skeleton_based_action_recognition/test_stbln.py @@ -14,12 +14,13 @@ import unittest import shutil -import os import torch import numpy as np from opendr.perception.skeleton_based_action_recognition import SpatioTemporalGCNLearner from opendr.engine.datasets import ExternalDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -47,7 +48,7 @@ def setUpClass(cls): "*********************************") cls.temp_dir = PATH_ cls.logging_path = LOG_PATH_ - cls.stbln_action_classifier = SpatioTemporalGCNLearner(device="cpu", temp_path=cls.temp_dir, + cls.stbln_action_classifier = SpatioTemporalGCNLearner(device=device, temp_path=cls.temp_dir, batch_size=2, epochs=1, checkpoint_after_iter=1, val_batch_size=2, dataset_name='nturgbd_cv', diff --git a/tests/sources/tools/perception/skeleton_based_action_recognition/test_stgcn.py b/tests/sources/tools/perception/skeleton_based_action_recognition/test_stgcn.py index e1ec303fbe..0f9d3cff3b 100644 --- a/tests/sources/tools/perception/skeleton_based_action_recognition/test_stgcn.py +++ b/tests/sources/tools/perception/skeleton_based_action_recognition/test_stgcn.py @@ -14,12 +14,13 @@ import unittest import shutil -import os import torch import numpy as np from opendr.perception.skeleton_based_action_recognition import SpatioTemporalGCNLearner from opendr.engine.datasets import ExternalDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -47,7 +48,7 @@ def setUpClass(cls): "*********************************") cls.temp_dir = PATH_ cls.logging_path = LOG_PATH_ - cls.stgcn_action_classifier = SpatioTemporalGCNLearner(device="cpu", temp_path=cls.temp_dir, + cls.stgcn_action_classifier = SpatioTemporalGCNLearner(device=device, temp_path=cls.temp_dir, batch_size=2, epochs=1, checkpoint_after_iter=1, val_batch_size=2, dataset_name='nturgbd_cv', diff --git a/tests/sources/tools/perception/skeleton_based_action_recognition/test_tagcn.py b/tests/sources/tools/perception/skeleton_based_action_recognition/test_tagcn.py index b6fea730df..f0ab3ce646 100644 --- a/tests/sources/tools/perception/skeleton_based_action_recognition/test_tagcn.py +++ b/tests/sources/tools/perception/skeleton_based_action_recognition/test_tagcn.py @@ -14,12 +14,13 @@ import unittest import shutil -import os import torch import numpy as np from opendr.perception.skeleton_based_action_recognition import SpatioTemporalGCNLearner from opendr.engine.datasets import ExternalDataset +import os +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' def rmfile(path): try: @@ -47,7 +48,7 @@ def setUpClass(cls): "*********************************") cls.temp_dir = PATH_ cls.logging_path = LOG_PATH_ - cls.tagcn_action_classifier = SpatioTemporalGCNLearner(device="cpu", temp_path=cls.temp_dir, + cls.tagcn_action_classifier = SpatioTemporalGCNLearner(device=device, temp_path=cls.temp_dir, batch_size=2, epochs=1, checkpoint_after_iter=1, val_batch_size=2, dataset_name='nturgbd_cv', diff --git a/tests/sources/tools/perception/speech_recognition/edgespeechnets/test_edgespeechnets.py b/tests/sources/tools/perception/speech_recognition/edgespeechnets/test_edgespeechnets.py index 57fdfee017..b95a6b7463 100644 --- a/tests/sources/tools/perception/speech_recognition/edgespeechnets/test_edgespeechnets.py +++ b/tests/sources/tools/perception/speech_recognition/edgespeechnets/test_edgespeechnets.py @@ -13,7 +13,6 @@ # limitations under the License. import json -import os import shutil import unittest @@ -24,6 +23,9 @@ from opendr.engine.data import Timeseries from opendr.engine.datasets import DatasetIterator from opendr.engine.target import Category +import os + +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' TEST_BATCH_SIZE = 2 TEST_EPOCHS = 1 @@ -53,7 +55,7 @@ class EdgeSpeechNetsTest(unittest.TestCase): def setUpClass(cls): print("\n\n**********************************\nTEST Edge Speech Nets Learner\n" "**********************************") - cls.learner = EdgeSpeechNetsLearner(device="cpu", output_classes_n=TEST_CLASSES_N, iters=TEST_EPOCHS) + cls.learner = EdgeSpeechNetsLearner(device=device, output_classes_n=TEST_CLASSES_N, iters=TEST_EPOCHS) @classmethod def tearDownClass(cls): diff --git a/tests/sources/tools/perception/speech_recognition/matchboxnet/test_matchboxnet.py b/tests/sources/tools/perception/speech_recognition/matchboxnet/test_matchboxnet.py index cdc29e0de8..b4e130757f 100644 --- a/tests/sources/tools/perception/speech_recognition/matchboxnet/test_matchboxnet.py +++ b/tests/sources/tools/perception/speech_recognition/matchboxnet/test_matchboxnet.py @@ -13,7 +13,6 @@ # limitations under the License. import json -import os import shutil import unittest from urllib.request import urlretrieve @@ -27,6 +26,9 @@ from opendr.engine.data import Timeseries from opendr.engine.datasets import DatasetIterator from opendr.engine.target import Category +import os + +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' TEST_BATCH_SIZE = 2 TEST_EPOCHS = 1 @@ -56,7 +58,7 @@ class MatchboxNetTest(unittest.TestCase): def setUpClass(cls): print("\n\n**********************************\nTEST Speech command recognition MatchboxNetLearner\n" "**********************************") - cls.learner = MatchboxNetLearner(device="cpu", output_classes_n=TEST_CLASSES_N, iters=TEST_EPOCHS) + cls.learner = MatchboxNetLearner(device=device, output_classes_n=TEST_CLASSES_N, iters=TEST_EPOCHS) if not os.path.exists(TEMP_SAVE_DIR): os.makedirs(TEMP_SAVE_DIR, exist_ok=True) diff --git a/tests/sources/tools/perception/speech_recognition/quadraticselfonn/test_quadraticselfonn.py b/tests/sources/tools/perception/speech_recognition/quadraticselfonn/test_quadraticselfonn.py index 81233ecfb7..777bfe01f6 100644 --- a/tests/sources/tools/perception/speech_recognition/quadraticselfonn/test_quadraticselfonn.py +++ b/tests/sources/tools/perception/speech_recognition/quadraticselfonn/test_quadraticselfonn.py @@ -14,7 +14,6 @@ import librosa import json -import os import shutil import unittest from urllib.request import urlretrieve @@ -27,6 +26,9 @@ from opendr.engine.data import Timeseries from opendr.engine.datasets import DatasetIterator from opendr.engine.target import Category +import os + +device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' TEST_BATCH_SIZE = 2 TEST_EPOCHS = 1 @@ -56,7 +58,7 @@ class QuadraticSelfOnnTest(unittest.TestCase): def setUpClass(cls): print("\n\n**********************************\nTEST Speech Quadratic Self-ONN Learner\n" "**********************************") - cls.learner = QuadraticSelfOnnLearner(device="cpu", output_classes_n=TEST_CLASSES_N, iters=TEST_EPOCHS) + cls.learner = QuadraticSelfOnnLearner(device=device, output_classes_n=TEST_CLASSES_N, iters=TEST_EPOCHS) if not os.path.exists(TEMP_SAVE_DIR): os.makedirs(TEMP_SAVE_DIR, exist_ok=True) From f95ad29b6faf66df4ca00a7c4fb3734f4178b196 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 11 Mar 2022 14:11:54 +0200 Subject: [PATCH 31/69] Style fixes --- .../object_detection_2d/centernet/test_centernet.py | 1 + .../tools/perception/object_detection_2d/gem/test_gem.py | 5 +---- .../object_detection_2d/retinaface/test_retinaface.py | 2 +- .../tools/perception/object_detection_2d/ssd/test_ssd.py | 1 + .../perception/object_detection_2d/yolov3/test_yolo3.py | 1 + .../lightweight_open_pose/test_lightweight_open_pose.py | 1 + .../skeleton_based_action_recognition/test_pstgcn.py | 1 + .../skeleton_based_action_recognition/test_stbln.py | 1 + .../skeleton_based_action_recognition/test_stgcn.py | 1 + .../skeleton_based_action_recognition/test_tagcn.py | 1 + 10 files changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/sources/tools/perception/object_detection_2d/centernet/test_centernet.py b/tests/sources/tools/perception/object_detection_2d/centernet/test_centernet.py index cfe99ffb9f..a1088b4f4a 100644 --- a/tests/sources/tools/perception/object_detection_2d/centernet/test_centernet.py +++ b/tests/sources/tools/perception/object_detection_2d/centernet/test_centernet.py @@ -23,6 +23,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/object_detection_2d/gem/test_gem.py b/tests/sources/tools/perception/object_detection_2d/gem/test_gem.py index 9790836971..7cef1e3dd2 100644 --- a/tests/sources/tools/perception/object_detection_2d/gem/test_gem.py +++ b/tests/sources/tools/perception/object_detection_2d/gem/test_gem.py @@ -20,12 +20,9 @@ from opendr.engine.datasets import ExternalDataset from opendr.perception.object_detection_2d import GemLearner import os - -device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' - from PIL import Image -DEVICE = device +DEVICE = device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' print("Using device:", DEVICE) print("Using device:", DEVICE, file=sys.stderr) diff --git a/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py b/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py index 6a5fed214c..f6670af1a6 100644 --- a/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py +++ b/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py @@ -16,7 +16,6 @@ import gc import cv2 import shutil -import os import numpy as np from opendr.perception.object_detection_2d import RetinaFaceLearner from opendr.perception.object_detection_2d import WiderFaceDataset @@ -24,6 +23,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py b/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py index 71ea6dfa0f..c2c6a756a3 100644 --- a/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py +++ b/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py @@ -24,6 +24,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py b/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py index 9d551edbfc..43f9d79bff 100644 --- a/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py +++ b/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py @@ -24,6 +24,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/pose_estimation/lightweight_open_pose/test_lightweight_open_pose.py b/tests/sources/tools/perception/pose_estimation/lightweight_open_pose/test_lightweight_open_pose.py index 3b0a063dd9..7a8ab06a95 100644 --- a/tests/sources/tools/perception/pose_estimation/lightweight_open_pose/test_lightweight_open_pose.py +++ b/tests/sources/tools/perception/pose_estimation/lightweight_open_pose/test_lightweight_open_pose.py @@ -23,6 +23,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/skeleton_based_action_recognition/test_pstgcn.py b/tests/sources/tools/perception/skeleton_based_action_recognition/test_pstgcn.py index 3d76d93a63..9d567a5d1e 100644 --- a/tests/sources/tools/perception/skeleton_based_action_recognition/test_pstgcn.py +++ b/tests/sources/tools/perception/skeleton_based_action_recognition/test_pstgcn.py @@ -22,6 +22,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/skeleton_based_action_recognition/test_stbln.py b/tests/sources/tools/perception/skeleton_based_action_recognition/test_stbln.py index adc353971b..e786d03d46 100644 --- a/tests/sources/tools/perception/skeleton_based_action_recognition/test_stbln.py +++ b/tests/sources/tools/perception/skeleton_based_action_recognition/test_stbln.py @@ -22,6 +22,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/skeleton_based_action_recognition/test_stgcn.py b/tests/sources/tools/perception/skeleton_based_action_recognition/test_stgcn.py index 0f9d3cff3b..13881ab0da 100644 --- a/tests/sources/tools/perception/skeleton_based_action_recognition/test_stgcn.py +++ b/tests/sources/tools/perception/skeleton_based_action_recognition/test_stgcn.py @@ -22,6 +22,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/skeleton_based_action_recognition/test_tagcn.py b/tests/sources/tools/perception/skeleton_based_action_recognition/test_tagcn.py index f0ab3ce646..af58ff4c06 100644 --- a/tests/sources/tools/perception/skeleton_based_action_recognition/test_tagcn.py +++ b/tests/sources/tools/perception/skeleton_based_action_recognition/test_tagcn.py @@ -22,6 +22,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) From c08cdb6cadc6cfc42ba8fb23e332bcb6046ccf67 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 11 Mar 2022 15:26:35 +0200 Subject: [PATCH 32/69] Style fixes --- .../tools/perception/face_recognition/test_face_recognition.py | 1 + .../landmark_based_facial_expression_recognition/test_pstbln.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/sources/tools/perception/face_recognition/test_face_recognition.py b/tests/sources/tools/perception/face_recognition/test_face_recognition.py index 5d7704d28f..dd2cf555b5 100644 --- a/tests/sources/tools/perception/face_recognition/test_face_recognition.py +++ b/tests/sources/tools/perception/face_recognition/test_face_recognition.py @@ -21,6 +21,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) diff --git a/tests/sources/tools/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/test_pstbln.py b/tests/sources/tools/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/test_pstbln.py index 7a272f7776..558232a8f6 100644 --- a/tests/sources/tools/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/test_pstbln.py +++ b/tests/sources/tools/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/test_pstbln.py @@ -22,6 +22,7 @@ device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' + def rmfile(path): try: os.remove(path) From b1fc568614e7ff5c375d5367c3d5802bd33df8a7 Mon Sep 17 00:00:00 2001 From: ad-daniel Date: Fri, 11 Mar 2022 16:10:39 +0100 Subject: [PATCH 33/69] fix sources --- .../sources/tools/perception/object_detection_2d/ssd/test_ssd.py | 1 - .../tools/perception/object_detection_2d/yolov3/test_yolo3.py | 1 - .../voxel_object_detection_3d/test_object_detection_3d.py | 1 - 3 files changed, 3 deletions(-) diff --git a/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py b/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py index c2c6a756a3..0b96a5e6fa 100644 --- a/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py +++ b/tests/sources/tools/perception/object_detection_2d/ssd/test_ssd.py @@ -20,7 +20,6 @@ import numpy as np from opendr.perception.object_detection_2d import SingleShotDetectorLearner from opendr.perception.object_detection_2d import WiderPersonDataset -import os device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' diff --git a/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py b/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py index 43f9d79bff..f3fc59fe1b 100644 --- a/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py +++ b/tests/sources/tools/perception/object_detection_2d/yolov3/test_yolo3.py @@ -20,7 +20,6 @@ import numpy as np from opendr.perception.object_detection_2d import YOLOv3DetectorLearner from opendr.perception.object_detection_2d import WiderPersonDataset -import os device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' diff --git a/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py b/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py index 4c628ed52a..ae805ea1fd 100644 --- a/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py +++ b/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py @@ -20,7 +20,6 @@ from opendr.engine.datasets import PointCloudsDatasetIterator from opendr.perception.object_detection_3d import VoxelObjectDetection3DLearner from opendr.perception.object_detection_3d import KittiDataset, LabeledPointCloudsDatasetIterator -import os DEVICE = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu' From 9238d6121d940b99d220c62e69c2bc0df2654980 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 09:36:50 +0200 Subject: [PATCH 34/69] Separate tests for face detection --- .../perception/object_detection_2d/retinaface => }/__init__.py | 0 tests/sources/__init__.py | 0 tests/sources/tools/perception/face_detection_2d/__init__.py | 0 .../tools/perception/face_detection_2d/retinaface/__init__.py | 0 .../retinaface/test_retinaface.py | 2 +- 5 files changed, 1 insertion(+), 1 deletion(-) rename tests/{sources/tools/perception/object_detection_2d/retinaface => }/__init__.py (100%) create mode 100644 tests/sources/__init__.py create mode 100644 tests/sources/tools/perception/face_detection_2d/__init__.py create mode 100644 tests/sources/tools/perception/face_detection_2d/retinaface/__init__.py rename tests/sources/tools/perception/{object_detection_2d => face_detection_2d}/retinaface/test_retinaface.py (97%) diff --git a/tests/sources/tools/perception/object_detection_2d/retinaface/__init__.py b/tests/__init__.py similarity index 100% rename from tests/sources/tools/perception/object_detection_2d/retinaface/__init__.py rename to tests/__init__.py diff --git a/tests/sources/__init__.py b/tests/sources/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/sources/tools/perception/face_detection_2d/__init__.py b/tests/sources/tools/perception/face_detection_2d/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/sources/tools/perception/face_detection_2d/retinaface/__init__.py b/tests/sources/tools/perception/face_detection_2d/retinaface/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py b/tests/sources/tools/perception/face_detection_2d/retinaface/test_retinaface.py similarity index 97% rename from tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py rename to tests/sources/tools/perception/face_detection_2d/retinaface/test_retinaface.py index f6670af1a6..e04af006a4 100644 --- a/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py +++ b/tests/sources/tools/perception/face_detection_2d/retinaface/test_retinaface.py @@ -45,7 +45,7 @@ def setUpClass(cls): print("\n\n**********************************\nTEST RetinaFace Learner\n" "**********************************") - cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "object_detection_2d", + cls.temp_dir = os.path.join("", "tests", "sources", "tools", "perception", "object_detection_2d", "retinaface", "retinaface_temp") cls.detector = RetinaFaceLearner(device=device, temp_path=cls.temp_dir, batch_size=1, epochs=1, checkpoint_after_iter=0, lr=1e-4) From 4b8f9014bcfe82a932d65efd8f70a742a6f8595f Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 09:38:14 +0200 Subject: [PATCH 35/69] Updated testing pipeline for face detection 2d --- .github/workflows/tests_suite.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/tests_suite.yml b/.github/workflows/tests_suite.yml index 8425d55411..a8f7e7abbe 100644 --- a/.github/workflows/tests_suite.yml +++ b/.github/workflows/tests_suite.yml @@ -72,6 +72,7 @@ jobs: - perception/semantic_segmentation - control/mobile_manipulation - perception/object_detection_2d + - perception/face_detection_2d - simulation/human_model_generation - perception/facial_expression_recognition - control/single_demo_grasp @@ -169,6 +170,7 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - perception/object_detection_2d + - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -233,6 +235,7 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - perception/object_detection_2d + - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -303,6 +306,7 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - perception/object_detection_2d + - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d - control/mobile_manipulation From 05977d16a5dc8dcbe26c4a4f915530747e736855 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 09:48:34 +0200 Subject: [PATCH 36/69] Update installation.md --- docs/reference/installation.md | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/reference/installation.md b/docs/reference/installation.md index 1f6d662d52..0774f385e2 100644 --- a/docs/reference/installation.md +++ b/docs/reference/installation.md @@ -36,9 +36,9 @@ Using dockerfiles is strongly advised (please see below), unless you know what y Please also make sure that you have enough RAM available for the installation (about 4GB of free RAM is needed for the full installation/compilation). -You can set the inference/training device using the `OPENDR_DEVICE` variable. -The toolkit defaults to using CPU. -If you want to use GPU, please set this variable accordingly: +If you want to install GPU-related dependencies, then you can appropriately set the `OPENDR_DEVICE` variable. +The toolkit defaults to using CPU. +Therefore, if you want to use GPU, please set this variable accordingly *before* running the installation script: ```bash export OPENDR_DEVICE=gpu ``` @@ -49,6 +49,10 @@ source ./bin/activate.sh ``` Then, you are ready to use the toolkit! +**NOTE:** `OPENDR_DEVICE` does not alter the inference/training device at *runtime*. +It only affects the dependency installation. +You can use OpenDR API to change the inference device. + You can also verify the installation by using the supplied Python and C unit tests: ```bash make unittest @@ -57,6 +61,8 @@ make ctests If you plan to use GPU-enabled functionalities, then you are advised to install [CUDA 11.1](https://developer.nvidia.com/cuda-11.1.0-download-archive), along with [CuDNN](https://developer.nvidia.com/cudnn). +**HINT:** All tests probe for the `TEST_DEVICE` enviromental variable when running. +If this enviromental variable is set during testing, it allows for easily running all tests on a different device (e.g., setting `TEST_DEVICE=cuda:0` runs all tests on the first GPU of the system). # Installing using *pip* From 419575942949864c2f7d6fb84fc9236edb2ba224 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 10:25:06 +0200 Subject: [PATCH 37/69] Delete __init__.py --- tests/sources/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tests/sources/__init__.py diff --git a/tests/sources/__init__.py b/tests/sources/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 From 8587493f4e357e06c5c83f8207c9a61ce5cb47f5 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 10:36:00 +0200 Subject: [PATCH 38/69] Update device for tests --- .../perception/activity_recognition/cox3d/test_cox3d_learner.py | 2 +- .../perception/activity_recognition/x3d/test_x3d_learner.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py index f5476e90e8..3e23675c87 100644 --- a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py @@ -99,7 +99,7 @@ def test_eval(self): def test_infer(self): ds = KineticsDataset(path=self.dataset_path, frames_per_clip=4, split="test") dl = torch.utils.data.DataLoader(ds, batch_size=2, num_workers=0) - batch = next(iter(dl))[0] + batch = next(iter(dl))[0].to(device) batch = batch[:, :, 0] # Select a single frame self.learner.load(self.temp_dir / "weights" / f"x3d_{_BACKBONE}.pyth") diff --git a/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py b/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py index aaaa9e800f..2eb1bb8ccf 100644 --- a/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py @@ -99,7 +99,7 @@ def test_eval(self): def test_infer(self): ds = KineticsDataset(path=self.dataset_path, frames_per_clip=4, split="test") dl = torch.utils.data.DataLoader(ds, batch_size=2, num_workers=0) - batch = next(iter(dl))[0] + batch = next(iter(dl))[0].to(device) self.learner.load(self.temp_dir / "weights" / f"x3d_{_BACKBONE}.pyth") From 9f5d8a39b338e365cb219818eaa97b5dd53997f0 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 10:42:24 +0200 Subject: [PATCH 39/69] Fix device identification --- src/opendr/perception/activity_recognition/x3d/x3d_learner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py index 96111545c6..64f69b23eb 100644 --- a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py +++ b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py @@ -416,7 +416,7 @@ def configure_optimizers(): self.trainer = pl.Trainer( max_epochs=epochs or self.iters, - gpus=1 if self.device == "cuda" else 0, + gpus=1 if "cuda" in self.device else 0, callbacks=[ pl.callbacks.ModelCheckpoint( save_top_k=1, @@ -455,7 +455,7 @@ def eval(self, dataset: Dataset, steps: int=None) -> Dict[str, Any]: if not hasattr(self, "trainer"): self.trainer = pl.Trainer( - gpus=1 if self.device == "cuda" else 0, + gpus=1 if "cuda" in self.device else 0, logger=_experiment_logger(), ) self.trainer.limit_test_batches = steps or self.trainer.limit_test_batches From 99eb7041e6a4b836efb5f17d305aca76bb675885 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 11:03:05 +0200 Subject: [PATCH 40/69] Fixed inference device --- src/opendr/perception/activity_recognition/x3d/x3d_learner.py | 1 + .../activity_recognition/cox3d/test_cox3d_learner.py | 4 ++-- .../perception/activity_recognition/x3d/test_x3d_learner.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py index 64f69b23eb..67993f7a2f 100644 --- a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py +++ b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py @@ -324,6 +324,7 @@ def load(self, path: Union[str, Path]): weights_path = path.parent / meta_data["model_paths"] self._load_model_weights(weights_path) + self.model.to(self.device) return self diff --git a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py index 3e23675c87..bfdc8432ac 100644 --- a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py @@ -99,14 +99,14 @@ def test_eval(self): def test_infer(self): ds = KineticsDataset(path=self.dataset_path, frames_per_clip=4, split="test") dl = torch.utils.data.DataLoader(ds, batch_size=2, num_workers=0) - batch = next(iter(dl))[0].to(device) + batch = next(iter(dl))[0] batch = batch[:, :, 0] # Select a single frame self.learner.load(self.temp_dir / "weights" / f"x3d_{_BACKBONE}.pyth") self.learner.model.clean_model_state() # Input is Tensor - results1 = self.learner.infer(batch) + results1 = self.learner.infer(batch.to(device)) # Results is a batch with each item summing to 1.0 assert all([torch.isclose(torch.sum(r.confidence), torch.tensor(1.0)) for r in results1]) diff --git a/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py b/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py index 2eb1bb8ccf..728eae3c13 100644 --- a/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/x3d/test_x3d_learner.py @@ -99,12 +99,12 @@ def test_eval(self): def test_infer(self): ds = KineticsDataset(path=self.dataset_path, frames_per_clip=4, split="test") dl = torch.utils.data.DataLoader(ds, batch_size=2, num_workers=0) - batch = next(iter(dl))[0].to(device) + batch = next(iter(dl))[0] self.learner.load(self.temp_dir / "weights" / f"x3d_{_BACKBONE}.pyth") # Input is Tensor - results1 = self.learner.infer(batch) + results1 = self.learner.infer(batch.to(device)) # Results is a batch with each item summing to 1.0 assert all([torch.isclose(torch.sum(r.confidence), torch.tensor(1.0)) for r in results1]) From 0c0de3c9cb78014ab3f3101469425852c2b636c9 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 11:03:21 +0200 Subject: [PATCH 41/69] Update workflow --- .github/workflows/test_packages.yml | 2 ++ .github/workflows/tests_suite_develop.yml | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/.github/workflows/test_packages.yml b/.github/workflows/test_packages.yml index 13cd407950..637a0170fb 100644 --- a/.github/workflows/test_packages.yml +++ b/.github/workflows/test_packages.yml @@ -40,6 +40,7 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - perception/object_detection_2d + - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -83,6 +84,7 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - perception/object_detection_2d + - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d - control/mobile_manipulation diff --git a/.github/workflows/tests_suite_develop.yml b/.github/workflows/tests_suite_develop.yml index f5ab5b0398..42d9fe5108 100644 --- a/.github/workflows/tests_suite_develop.yml +++ b/.github/workflows/tests_suite_develop.yml @@ -71,6 +71,7 @@ jobs: - perception/semantic_segmentation - control/mobile_manipulation - perception/object_detection_2d + - perception/face_detection_2d - simulation/human_model_generation - perception/facial_expression_recognition - control/single_demo_grasp @@ -169,6 +170,7 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - perception/object_detection_2d + - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -234,6 +236,7 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - perception/object_detection_2d + - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -305,6 +308,7 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - perception/object_detection_2d + - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d - control/mobile_manipulation From bce4201a6919ff68ccc8c109ce9a9cac168146ba Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 11:24:20 +0200 Subject: [PATCH 42/69] Restored learner --- src/opendr/perception/activity_recognition/x3d/x3d_learner.py | 1 - 1 file changed, 1 deletion(-) diff --git a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py index 67993f7a2f..64f69b23eb 100644 --- a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py +++ b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py @@ -324,7 +324,6 @@ def load(self, path: Union[str, Path]): weights_path = path.parent / meta_data["model_paths"] self._load_model_weights(weights_path) - self.model.to(self.device) return self From 2ba072f1e2d44dedf71fb6777a5ad67f63a3cb33 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 11:29:42 +0200 Subject: [PATCH 43/69] Fixed weight placement --- src/opendr/perception/activity_recognition/x3d/x3d_learner.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py index 64f69b23eb..2eb4635774 100644 --- a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py +++ b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py @@ -182,6 +182,7 @@ def size_ok(k): names_not_loaded = set(new_model_state.keys()) - set(to_load.keys()) if len(names_not_loaded) > 0: logger.warning(f"Some model weight could not be loaded: {names_not_loaded}") + self.model.to(self.device) return self From 230fcef363cc691bcb2ff9b139951e75d1e63134 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 11:39:30 +0200 Subject: [PATCH 44/69] Fixed weight placement --- src/opendr/perception/activity_recognition/x3d/x3d_learner.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py index 2eb4635774..f842b43b4d 100644 --- a/src/opendr/perception/activity_recognition/x3d/x3d_learner.py +++ b/src/opendr/perception/activity_recognition/x3d/x3d_learner.py @@ -433,6 +433,7 @@ def configure_optimizers(): self.trainer.limit_val_batches = steps or self.trainer.limit_val_batches self.trainer.fit(self.model, train_dataloader, val_dataloader) + self.model.to(self.device) def eval(self, dataset: Dataset, steps: int=None) -> Dict[str, Any]: """Evaluate the model on the dataset From 3098d6431b58e8c4c272dcf4550effee31226c7b Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 12:02:47 +0200 Subject: [PATCH 45/69] Fixed cuda placement check --- .../face_recognition/algorithm/head/losses.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/opendr/perception/face_recognition/algorithm/head/losses.py b/src/opendr/perception/face_recognition/algorithm/head/losses.py index 50fa08bac1..7f33108333 100755 --- a/src/opendr/perception/face_recognition/algorithm/head/losses.py +++ b/src/opendr/perception/face_recognition/algorithm/head/losses.py @@ -49,8 +49,8 @@ def forward(self, input, label): phi = torch.where(cosine > self.th, phi, cosine - self.mm) # --------------------------- Convert label to one-hot --------------------------- one_hot = torch.zeros(cosine.size()) - if self.device == 'cuda': - one_hot = one_hot.cuda(self.device) + if 'cuda' in self.device: + one_hot = one_hot.to(self.device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot * phi) + ((1.0 - one_hot) * cosine) output *= self.s @@ -86,8 +86,8 @@ def forward(self, input, label): phi = cosine - self.m # --------------------------- Convert label to one-hot --------------------------- one_hot = torch.zeros(cosine.size()) - if self.device == 'cuda': - one_hot = one_hot.cuda(self.device) + if 'cuda' in self.device: + one_hot = one_hot.to(self.device) one_hot.scatter_(1, label.view(-1, 1).long(), 1) output = (one_hot * phi) + ((1.0 - one_hot) * cosine) output *= self.s @@ -154,8 +154,8 @@ def forward(self, input, label): # --------------------------- Convert label to one-hot --------------------------- one_hot = torch.zeros(cos_theta.size()) - if self.device == 'cuda': - one_hot = one_hot.cuda(self.device) + if 'cuda' in self.device: + one_hot = one_hot.to(self.device) one_hot.scatter_(1, label.view(-1, 1), 1) # --------------------------- Calculate output --------------------------- From 43f4b313906ba4765a80d41beb5979bfa4880fae Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 12:04:50 +0200 Subject: [PATCH 46/69] Fixed cuda placement check --- .../face_recognition/face_recognition_learner.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/opendr/perception/face_recognition/face_recognition_learner.py b/src/opendr/perception/face_recognition/face_recognition_learner.py index 3002233596..4945e6d6d3 100644 --- a/src/opendr/perception/face_recognition/face_recognition_learner.py +++ b/src/opendr/perception/face_recognition/face_recognition_learner.py @@ -839,8 +839,8 @@ def __load_from_onnx(self, path): self.ort_head_session = ort.InferenceSession(path_head) def __convert_to_onnx(self, verbose=False): - if self.device == 'cuda': - inp = torch.randn(1, 3, self.input_size[0], self.input_size[1]).cuda() + if 'cuda' in self.device: + inp = torch.randn(1, 3, self.input_size[0], self.input_size[1]).to(self.device) else: inp = torch.randn(1, 3, self.input_size[0], self.input_size[1]) input_names = ['data'] @@ -849,8 +849,8 @@ def __convert_to_onnx(self, verbose=False): torch.onnx.export(self.backbone_model, inp, output_name, verbose=verbose, enable_onnx_checker=True, input_names=input_names, output_names=output_names) if self.mode == 'full' and self.network_head == 'classifier': - if self.device == 'cuda': - inp = torch.randn(1, self.embedding_size).cuda() + if 'cuda' in self.device: + inp = torch.randn(1, self.embedding_size).to(self.device) else: inp = torch.randn(1, self.embedding_size) input_names = ['features'] From 812d68a7ea5c74626c6c0d345e6dac0e3da8f137 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 13:32:02 +0200 Subject: [PATCH 47/69] Fixed cuda placement check --- .../multilinear_compressive_learner.py | 8 ++-- ...progressive_spatio_temporal_bln_learner.py | 22 +++++----- ...attention_neural_bag_of_feature_learner.py | 8 ++-- .../gated_recurrent_unit_learner.py | 8 ++-- .../rgbd_hand_gesture_learner.py | 16 +++---- .../centernet/centernet_learner.py | 21 ++++++--- .../retinaface/retinaface_learner.py | 21 ++++++--- .../object_detection_2d/ssd/ssd_learner.py | 21 ++++++--- .../yolov3/yolov3_learner.py | 21 ++++++--- .../lightweight_open_pose_learner.py | 44 +++++++++---------- .../bisenet/bisenet_learner.py | 20 ++++----- ...progressive_spatio_temporal_gcn_learner.py | 18 ++++---- .../spatio_temporal_gcn_learner.py | 20 ++++----- .../edgespeechnets/edgespeechnets_learner.py | 4 +- .../matchboxnet/matchboxnet_learner.py | 4 +- .../quadraticselfonn_learner.py | 4 +- .../pifu_generator_learner.py | 2 +- 17 files changed, 149 insertions(+), 113 deletions(-) diff --git a/src/opendr/perception/compressive_learning/multilinear_compressive_learning/multilinear_compressive_learner.py b/src/opendr/perception/compressive_learning/multilinear_compressive_learning/multilinear_compressive_learner.py index 8a71441f13..3cb420cabc 100644 --- a/src/opendr/perception/compressive_learning/multilinear_compressive_learning/multilinear_compressive_learner.py +++ b/src/opendr/perception/compressive_learning/multilinear_compressive_learning/multilinear_compressive_learner.py @@ -215,7 +215,7 @@ def fit(self, train_set, val_set=None, test_set=None, logging_path='', silent=Fa train_loader = DataLoader(DataWrapper(train_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=True) if val_set is None: @@ -223,14 +223,14 @@ def fit(self, train_set, val_set=None, test_set=None, logging_path='', silent=Fa else: val_loader = DataLoader(DataWrapper(val_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) if test_set is None: test_loader = None else: test_loader = DataLoader(DataWrapper(test_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) if self.test_mode and not silent: @@ -298,7 +298,7 @@ def eval(self, dataset, silent=False, verbose=True): self._validate_dataset(dataset) loader = DataLoader(DataWrapper(dataset), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) device = torch.device(self.device) diff --git a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py index 0941f362c4..89603c5aaf 100644 --- a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py +++ b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py @@ -99,7 +99,7 @@ def __init__(self, lr=1e-1, batch_size=128, optimizer_name='sgd', lr_schedule='' if self.dataset_name is None: raise ValueError(self.dataset_name + "is not a valid dataset name. Supported datasets: casia, ck+, afew") - if self.device == 'cuda': + if 'cuda' in self.device: self.output_device = self.device_indices[0] if type(self.device_indices) is list else self.device_indices self.__init_seed(1) @@ -162,7 +162,7 @@ def fit(self, dataset, val_dataset, logging_path='', silent=False, verbose=True, else: self.logging = False - if self.device == 'cuda': + if 'cuda' in self.device: if type(self.device_indices) is list: if len(self.device_indices) > 1: self.model = nn.DataParallel(self.model, device_ids=self.device_indices, @@ -229,7 +229,7 @@ def fit(self, dataset, val_dataset, logging_path='', silent=False, verbose=True, for batch_idx, (data, label, index) in enumerate(process): self.global_step += 1 # get data - if self.device == 'cuda': + if 'cuda' in self.device: data = Variable(data.float().cuda(self.output_device), requires_grad=False) label = Variable(label.long().cuda(self.output_device), requires_grad=False) else: @@ -351,7 +351,7 @@ def eval(self, val_dataset, val_loader=None, epoch=0, monte_carlo_dropout=True, process = tqdm(val_loader) for batch_idx, (data, label, index) in enumerate(process): with torch.no_grad(): - if self.device == "cuda": + if "cuda" in self.device: data = Variable(data.float().cuda(self.output_device), requires_grad=False) label = Variable(label.long().cuda(self.output_device), requires_grad=False) else: @@ -365,7 +365,7 @@ def eval(self, val_dataset, val_loader=None, epoch=0, monte_carlo_dropout=True, for i in range(len(output)): list_output.append(output[i].cpu()) output = torch.stack(list_output).mean(axis=0) - if self.device == "cuda": + if "cuda" in self.device: output = output.cuda(self.output_device) else: output = self.model(data) @@ -463,7 +463,7 @@ def init_model(self): else: if self.logging: shutil.copy2(inspect.getfile(PSTBLN), self.logging_path) - if self.device == 'cuda': + if 'cuda' in self.device: self.model = PSTBLN(num_class=self.num_class, num_point=self.num_point, num_person=self.num_person, in_channels=self.in_channels, topology=self.topology, blocksize=self.blocksize, cuda_=True).cuda(self.output_device) @@ -552,7 +552,7 @@ def infer(self, facial_landmarks_batch, monte_carlo_dropout=True, mcdo_repeats=1 facial_landmarks_batch = SkeletonSequence(facial_landmarks_batch) facial_landmarks_batch = torch.from_numpy(facial_landmarks_batch.numpy()) - if self.device == 'cuda': + if 'cuda' in self.device: facial_landmarks_batch = Variable(facial_landmarks_batch.float().cuda(self.output_device), requires_grad=False) else: @@ -580,7 +580,7 @@ def infer(self, facial_landmarks_batch, monte_carlo_dropout=True, mcdo_repeats=1 print('mean predicted probability for each lass is:', mean_probs) print('uncertainty of the predictions for each lass is:', std_probs) output = torch.stack(list_output).mean(axis=0) - if self.device == "cuda": + if "cuda" in self.device: output = output.cuda(self.output_device) else: output = self.model(facial_landmarks_batch) @@ -636,7 +636,7 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=True " CK+, AFEW") n = self.batch_size onnx_input = torch.randn(n, c, t, v, m) - if self.device == "cuda": + if "cuda" in self.device: onnx_input = Variable(onnx_input.float().cuda(self.output_device), requires_grad=False) else: onnx_input = Variable(onnx_input.float(), requires_grad=False) @@ -742,7 +742,7 @@ def __load_from_pt(self, path, verbose=True): raise e if verbose: print("Loading checkpoint") - if self.device == "cuda": + if "cuda" in self.device: weights = OrderedDict( [[k.split('module.')[-1], v.cuda(self.output_device)] for k, v in weights.items()]) else: @@ -911,7 +911,7 @@ def __count_parameters(self): return sum(p.numel() for p in self.model.parameters() if p.requires_grad) def __init_seed(self, seed): - if self.device == "cuda": + if "cuda" in self.device: torch.cuda.manual_seed_all(seed) torch.backends.cudnn.enabled = True torch.backends.cudnn.deterministic = True diff --git a/src/opendr/perception/heart_anomaly_detection/attention_neural_bag_of_feature/attention_neural_bag_of_feature_learner.py b/src/opendr/perception/heart_anomaly_detection/attention_neural_bag_of_feature/attention_neural_bag_of_feature_learner.py index 9af30cd277..ddc6815a0c 100644 --- a/src/opendr/perception/heart_anomaly_detection/attention_neural_bag_of_feature/attention_neural_bag_of_feature_learner.py +++ b/src/opendr/perception/heart_anomaly_detection/attention_neural_bag_of_feature/attention_neural_bag_of_feature_learner.py @@ -203,7 +203,7 @@ def fit(self, train_loader = DataLoader(DataWrapper(train_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=True) if val_set is None: @@ -211,14 +211,14 @@ def fit(self, else: val_loader = DataLoader(DataWrapper(val_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) if test_set is None: test_loader = None else: test_loader = DataLoader(DataWrapper(test_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) if self.test_mode and not silent: @@ -274,7 +274,7 @@ def eval(self, dataset, silent=False, verbose=True): self._validate_dataset(dataset) loader = DataLoader(DataWrapper(dataset), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) device = torch.device(self.device) diff --git a/src/opendr/perception/heart_anomaly_detection/gated_recurrent_unit/gated_recurrent_unit_learner.py b/src/opendr/perception/heart_anomaly_detection/gated_recurrent_unit/gated_recurrent_unit_learner.py index 50ca5ccc00..2d861a17b8 100644 --- a/src/opendr/perception/heart_anomaly_detection/gated_recurrent_unit/gated_recurrent_unit_learner.py +++ b/src/opendr/perception/heart_anomaly_detection/gated_recurrent_unit/gated_recurrent_unit_learner.py @@ -193,7 +193,7 @@ def fit(self, train_loader = DataLoader(DataWrapper(train_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=True) if val_set is None: @@ -201,14 +201,14 @@ def fit(self, else: val_loader = DataLoader(DataWrapper(val_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) if test_set is None: test_loader = None else: test_loader = DataLoader(DataWrapper(test_set), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) if self.test_mode and not silent: @@ -266,7 +266,7 @@ def eval(self, dataset, silent=False, verbose=True): self._validate_dataset(dataset) loader = DataLoader(DataWrapper(dataset), batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) device = torch.device(self.device) diff --git a/src/opendr/perception/multimodal_human_centric/rgbd_hand_gesture_learner/rgbd_hand_gesture_learner.py b/src/opendr/perception/multimodal_human_centric/rgbd_hand_gesture_learner/rgbd_hand_gesture_learner.py index 7206df99d0..c6e3b2f0c1 100644 --- a/src/opendr/perception/multimodal_human_centric/rgbd_hand_gesture_learner/rgbd_hand_gesture_learner.py +++ b/src/opendr/perception/multimodal_human_centric/rgbd_hand_gesture_learner/rgbd_hand_gesture_learner.py @@ -198,14 +198,14 @@ def fit(self, train_set, val_set=None, test_set=None, logging_path='', silent=Fa if isinstance(train_set, RgbdDataset): train_loader = DataLoader(train_set, batch_size=self.batch_size, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, num_workers=self.n_workers, shuffle=True) else: train_loader = DataLoader(DataWrapper(train_set), batch_size=self.batch_size, num_workers=self.n_workers, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=True) if val_set is None: @@ -215,13 +215,13 @@ def fit(self, train_set, val_set=None, test_set=None, logging_path='', silent=Fa val_loader = DataLoader(val_set, batch_size=self.batch_size, num_workers=self.n_workers, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) else: val_loader = DataLoader(DataWrapper(val_set), batch_size=self.batch_size, num_workers=self.n_workers, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) if test_set is None: @@ -231,13 +231,13 @@ def fit(self, train_set, val_set=None, test_set=None, logging_path='', silent=Fa test_loader = DataLoader(test_set, batch_size=self.batch_size, num_workers=self.n_workers, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) else: test_loader = DataLoader(DataWrapper(test_set), batch_size=self.batch_size, num_workers=self.n_workers, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) if self.test_mode and not silent: @@ -296,13 +296,13 @@ def eval(self, dataset, silent=False, verbose=True): loader = DataLoader(dataset, batch_size=self.batch_size, num_workers=self.n_workers, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) else: loader = DataLoader(DataWrapper(dataset), batch_size=self.batch_size, num_workers=self.n_workers, - pin_memory=self.device == 'cuda', + pin_memory='cuda' in self.device, shuffle=False) device = torch.device(self.device) diff --git a/src/opendr/perception/object_detection_2d/centernet/centernet_learner.py b/src/opendr/perception/object_detection_2d/centernet/centernet_learner.py index 196d8c3396..fd986af456 100644 --- a/src/opendr/perception/object_detection_2d/centernet/centernet_learner.py +++ b/src/opendr/perception/object_detection_2d/centernet/centernet_learner.py @@ -82,9 +82,12 @@ def __init__(self, lr=1e-3, epochs=120, batch_size=8, device='cuda', backbone='r self.wh_weight = wh_weight self.center_reg_weight = center_reg_weight - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - self.ctx = mx.gpu(0) + if self.device == 'cuda': + self.ctx = mx.gpu(0) + else: + self.ctx = mx.gpu(int(self.device.split(':')[1])) else: self.ctx = mx.cpu() else: @@ -147,9 +150,12 @@ def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=T print('Saving models as {}'.format(save_prefix)) # get net & set device - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - ctx = [mx.gpu(0)] + if self.device == 'cuda': + ctx = [mx.gpu(0)] + else: + ctx = [mx.gpu(int(self.device.split(':')[1]))] else: ctx = [mx.cpu()] else: @@ -310,9 +316,12 @@ def eval(self, dataset, use_subset=False, subset_size=100, verbose=False): autograd.set_training(False) # NOTE: multi-gpu is a little bugged - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - ctx = [mx.gpu(0)] + if self.device == 'cuda': + ctx = [mx.gpu(0)] + else: + ctx = [mx.gpu(int(self.device.split(':')[1]))] else: ctx = [mx.cpu()] else: diff --git a/src/opendr/perception/object_detection_2d/retinaface/retinaface_learner.py b/src/opendr/perception/object_detection_2d/retinaface/retinaface_learner.py index ce1049b53b..45d3a3281b 100644 --- a/src/opendr/perception/object_detection_2d/retinaface/retinaface_learner.py +++ b/src/opendr/perception/object_detection_2d/retinaface/retinaface_learner.py @@ -72,9 +72,12 @@ def __init__(self, backbone='resnet', lr=0.001, batch_size=2, checkpoint_after_i checkpoint_load_iter=checkpoint_load_iter, temp_path=temp_path, device=device) self.device = device - if device == 'cuda': + if 'cuda' in device: if mx.context.num_gpus() > 0: - self.gpu_id = 0 + if device == 'cuda': + self.gpu_id = 0 + else: + self.gpu_id = int(self.device.split(':')[1]) else: self.gpu_id = -1 else: @@ -105,13 +108,19 @@ def __init__(self, backbone='resnet', lr=0.001, batch_size=2, checkpoint_after_i def __get_ctx(self): ctx = [] - if 'CUDA_VISIBLE_DEVICES' in os.environ and self.device == 'cuda': + if 'CUDA_VISIBLE_DEVICES' in os.environ and 'cuda' in self.device: cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip() - elif self.device == 'cuda' and mx.context.num_gpus() > 0: - cvd = ['0'] + elif 'cuda' in self.device and mx.context.num_gpus() > 0: + if 'cuda' in self.device: + if self.device == 'cuda': + cvd = ['0'] + else: + cvd = [self.device.split(':')[1]] + else: + cvd = [self.device.split(':')[1]] else: cvd = [] - if len(cvd) > 0 and self.device == 'cuda': + if len(cvd) > 0 and 'cuda' in self.device: if isinstance(cvd, str): visibles_ids = cvd.split(',') elif isinstance(cvd, list): diff --git a/src/opendr/perception/object_detection_2d/ssd/ssd_learner.py b/src/opendr/perception/object_detection_2d/ssd/ssd_learner.py index 34dbdc5760..386f5b5306 100644 --- a/src/opendr/perception/object_detection_2d/ssd/ssd_learner.py +++ b/src/opendr/perception/object_detection_2d/ssd/ssd_learner.py @@ -82,9 +82,12 @@ def __init__(self, lr=1e-3, epochs=120, batch_size=8, "Supported image sizes: {}".format(img_size, self.backbone, self.supported_backbones[self.backbone])) - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - self.ctx = mx.gpu(0) + if self.device == 'cuda': + self.ctx = mx.gpu(0) + else: + self.ctx = mx.gpu(int(self.device.split(':')[1])) else: self.ctx = mx.cpu() print("Device set to cuda but no GPU available, using CPU...") @@ -321,9 +324,12 @@ def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=T # set device # NOTE: multi-gpu a little bugged - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - ctx = [mx.gpu(0)] + if self.device == 'cuda': + ctx = [mx.gpu(0)] + else: + ctx = [mx.gpu(int(self.device.split(':')[1]))] else: ctx = [mx.cpu()] else: @@ -471,9 +477,12 @@ def eval(self, dataset, use_subset=False, subset_size=100, verbose=False): """ autograd.set_training(False) # NOTE: multi-gpu is a little bugged - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - ctx = [mx.gpu(0)] + if self.device == 'cuda': + ctx = [mx.gpu(0)] + else: + ctx = [mx.gpu(int(self.device.split(':')[1]))] else: ctx = [mx.cpu()] else: diff --git a/src/opendr/perception/object_detection_2d/yolov3/yolov3_learner.py b/src/opendr/perception/object_detection_2d/yolov3/yolov3_learner.py index 6a47381b5e..3cb4303fe1 100644 --- a/src/opendr/perception/object_detection_2d/yolov3/yolov3_learner.py +++ b/src/opendr/perception/object_detection_2d/yolov3/yolov3_learner.py @@ -86,9 +86,12 @@ def __init__(self, lr=1e-3, epochs=120, batch_size=8, device='cuda', backbone='d if self.backbone not in self.supported_backbones: raise ValueError(self.backbone + " backbone is not supported.") - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - self.ctx = mx.gpu(0) + if self.device == 'cuda': + self.ctx = mx.gpu(0) + else: + self.ctx = mx.gpu(int(self.device.split(':')[1])) else: print('No GPU found, using CPU...') self.ctx = mx.cpu() @@ -183,9 +186,12 @@ def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=T raise e # get net & set device - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - ctx = [mx.gpu(0)] + if self.device == 'cuda': + ctx = [mx.gpu(0)] + else: + ctx = [mx.gpu(int(self.device.split(':')[1]))] else: ctx = [mx.cpu()] else: @@ -352,9 +358,12 @@ def eval(self, dataset, use_subset=False, subset_size=100, verbose=True): autograd.set_training(False) # TODO: multi-gpu? - if self.device == 'cuda': + if 'cuda' in self.device: if mx.context.num_gpus() > 0: - ctx = [mx.gpu(0)] + if self.device == 'cuda': + ctx = [mx.gpu(0)] + else: + ctx = [mx.gpu(int(self.device.split(':')[1]))] else: ctx = [mx.cpu()] else: diff --git a/src/opendr/perception/pose_estimation/lightweight_open_pose/lightweight_open_pose_learner.py b/src/opendr/perception/pose_estimation/lightweight_open_pose/lightweight_open_pose_learner.py index 9546107f4c..66781d3bcb 100644 --- a/src/opendr/perception/pose_estimation/lightweight_open_pose/lightweight_open_pose_learner.py +++ b/src/opendr/perception/pose_estimation/lightweight_open_pose/lightweight_open_pose_learner.py @@ -275,13 +275,13 @@ def fit(self, dataset, val_dataset=None, logging_path='', logging_flush_secs=30, if not self.weights_only and self.checkpoint_load_iter != 0: try: optimizer.load_state_dict(checkpoint['optimizer']) - if self.device == "cuda": + if "cuda" in self.device: # Move optimizer state to cuda # Taken from https://github.com/pytorch/pytorch/issues/2830#issuecomment-336194949 for state in optimizer.state.values(): for k, v in state.items(): if torch.is_tensor(v): - state[k] = v.cuda() + state[k] = v.to(self.device) scheduler.load_state_dict(checkpoint['scheduler']) num_iter = checkpoint['iter'] current_epoch = checkpoint['current_epoch'] @@ -290,11 +290,11 @@ def fit(self, dataset, val_dataset=None, logging_path='', logging_flush_secs=30, elif self.checkpoint_load_iter != 0: num_iter = self.checkpoint_load_iter - if self.device == "cuda": + if "cuda" in self.device: self.model = DataParallel(self.model) self.model.train() - if self.device == "cuda": - self.model = self.model.cuda() + if "cuda" in self.device: + self.model = self.model.to(self.device) if epochs is not None: self.epochs = epochs @@ -320,12 +320,12 @@ def fit(self, dataset, val_dataset=None, logging_path='', logging_flush_secs=30, paf_masks = batch_data['paf_mask'] keypoint_maps = batch_data['keypoint_maps'] paf_maps = batch_data['paf_maps'] - if self.device == "cuda": - images = images.cuda() - keypoint_masks = keypoint_masks.cuda() - paf_masks = paf_masks.cuda() - keypoint_maps = keypoint_maps.cuda() - paf_maps = paf_maps.cuda() + if "cuda" in self.device: + images = images.to(self.device) + keypoint_masks = keypoint_masks.to(self.device) + paf_masks = paf_masks.to(self.device) + keypoint_maps = keypoint_maps.to(self.device) + paf_maps = paf_maps.to(self.device) stages_output = self.model(images) losses = [] @@ -450,7 +450,7 @@ def fit(self, dataset, val_dataset=None, logging_path='', logging_flush_secs=30, if logging: file_writer.close() # Return a dict of lists of PAF and Heatmap losses per stage and a list of all evaluation results dictionaries - if self.half and self.device == 'cuda': + if self.half and 'cuda' in self.device: self.model.half() return {"paf_losses": paf_losses, "heatmap_losses": heatmap_losses, "eval_results_list": eval_results_list} @@ -518,8 +518,8 @@ def eval(self, dataset, silent=False, verbose=True, use_subset=True, subset_size raise AttributeError("self.model is None. Please load a model or set checkpoint_load_iter.") self.model = self.model.eval() # Change model state to evaluation - if self.device == "cuda": - self.model = self.model.cuda() + if "cuda" in self.device: + self.model = self.model.to(self.device) if self.half: self.model.half() @@ -616,8 +616,8 @@ def infer(self, img, upsample_ratio=4, track=True, smooth=True): padded_img, pad = pad_width(scaled_img, self.stride, self.pad_value, min_dims) tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float() - if self.device == "cuda": - tensor_img = tensor_img.cuda() + if "cuda" in self.device: + tensor_img = tensor_img.to(self.device) if self.half: tensor_img = tensor_img.half() @@ -805,8 +805,8 @@ def __load_from_pth(self, path): # load_from_mobilenet(self.model, checkpoint) # else: load_state(self.model, checkpoint) - if self.device == "cuda": - self.model.cuda() + if "cuda" in self.device: + self.model.to(self.device) if self.half: self.model.half() self.model.train(False) @@ -856,8 +856,8 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=Fals :type do_constant_folding: bool, optional """ width = 344 - if self.device == "cuda": - inp = torch.randn(1, 3, self.base_height, width).cuda() + if "cuda" in self.device: + inp = torch.randn(1, 3, self.base_height, width).to(self.device) else: inp = torch.randn(1, 3, self.base_height, width) if self.half: @@ -1055,8 +1055,8 @@ def __infer_eval(self, img): padded_img, pad = pad_width(scaled_img, stride, pad_value, min_dims) tensor_img = torch.from_numpy(padded_img).permute(2, 0, 1).unsqueeze(0).float() - if self.device == "cuda": - tensor_img = tensor_img.cuda() + if "cuda" in self.device: + tensor_img = tensor_img.to(self.device) if self.half: tensor_img = tensor_img.half() stages_output = self.model(tensor_img) diff --git a/src/opendr/perception/semantic_segmentation/bisenet/bisenet_learner.py b/src/opendr/perception/semantic_segmentation/bisenet/bisenet_learner.py index f5ec72da4f..598a2db0d2 100644 --- a/src/opendr/perception/semantic_segmentation/bisenet/bisenet_learner.py +++ b/src/opendr/perception/semantic_segmentation/bisenet/bisenet_learner.py @@ -78,8 +78,8 @@ def __init__(self, def build_model(self): self.model = BiSeNet(num_classes=self.num_classes, context_path=self.context_path) - if self.device == 'cuda': - self.model = torch.nn.DataParallel(self.model).cuda() + if 'cuda' in self.device: + self.model = torch.nn.DataParallel(self.model).to(self.device) def fit(self, dataset, val_dataset=None, silent=False, verbose=True): """ @@ -101,9 +101,9 @@ def fit(self, dataset, val_dataset=None, silent=False, verbose=True): tq.set_description('epoch %d, lr %f' % (epoch, self.lr)) loss_record = [] for i, (data, label) in enumerate(dataloader_train): - if self.device == 'cuda': - data = data.cuda() - label = label.cuda() + if 'cuda' in self.device: + data = data.to(self.device) + label = label.to(self.device) output, output_sup1, output_sup2 = self.model(data) loss1 = self.loss_func(output, label) loss2 = self.loss_func(output_sup1, label) @@ -145,9 +145,9 @@ def eval(self, dataset, silent=False, verbose=True): hist = np.zeros((self.num_classes, self.num_classes)) for i, (data, label) in enumerate(dataloader_test): tq.update(1) - if self.device == 'cuda': - data = data.cuda() - label = label.cuda() + if 'cuda' in self.device: + data = data.to(self.device) + label = label.to(self.device) predict = self.model(data).squeeze().cpu() predict = reverse_one_hot(predict) predict = np.array(predict) @@ -269,7 +269,7 @@ def save(self, path, verbose=True): metadata["model_paths"].append(param_filepath) if self.device == 'cpu': torch.save(self.model.state_dict(), model_path) - elif self.device == 'cuda': + elif 'cuda' in self.device: torch.save(self.model.module.state_dict(), model_path) if verbose: print("Model parameters saved.") @@ -296,7 +296,7 @@ def load(self, path): if self.device == 'cpu': self.model.load_state_dict(torch.load(os.path.join(path, metadata["model_paths"][0]), map_location=torch.device('cpu')),) - elif self.device == 'cuda': + elif 'cuda' in self.device: self.model.module.load_state_dict(torch.load(os.path.join(path, metadata["model_paths"][0]))) self.model.eval() diff --git a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py index 5fe9b8c5e5..ab74680fc5 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py @@ -99,7 +99,7 @@ def __init__(self, lr=1e-1, batch_size=128, optimizer_name='sgd', lr_schedule='' if self.graph_type is None: raise ValueError(self.graph_type + "is not a valid graph type. Supported graphs: ntu, openpose") - if self.device == 'cuda': + if 'cuda' in self.device: self.output_device = self.device_ind[0] if type(self.device_ind) is list else self.device_ind self.__init_seed(1) @@ -156,7 +156,7 @@ def fit(self, dataset, val_dataset, logging_path='', silent=False, verbose=True, else: self.logging = False - if self.device == 'cuda': + if 'cuda' in self.device: if type(self.device_ind) is list: if len(self.device_ind) > 1: self.model = nn.DataParallel(self.model, device_ids=self.device_ind, @@ -228,7 +228,7 @@ def fit(self, dataset, val_dataset, logging_path='', silent=False, verbose=True, for batch_idx, (data, label, index) in enumerate(process): self.global_step += 1 # get data - if self.device == 'cuda': + if 'cuda' in self.device: data = Variable(data.float().cuda(self.output_device), requires_grad=False) label = Variable(label.long().cuda(self.output_device), requires_grad=False) else: @@ -346,7 +346,7 @@ def eval(self, val_dataset, val_loader=None, epoch=0, silent=False, verbose=True process = tqdm(val_loader) for batch_idx, (data, label, index) in enumerate(process): with torch.no_grad(): - if self.device == "cuda": + if "cuda" in self.device: data = Variable(data.float().cuda(self.output_device), requires_grad=False) label = Variable(label.long().cuda(self.output_device), requires_grad=False) else: @@ -455,7 +455,7 @@ def init_model(self): else: if self.logging: shutil.copy2(inspect.getfile(PSTGCN), self.logging_path) - if self.device == 'cuda': + if 'cuda' in self.device: self.model = PSTGCN(num_class=self.num_class, num_point=self.num_point, num_person=self.num_person, in_channels=self.in_channels, graph_type=self.graph_type, topology=self.topology, block_size=self.blocksize, @@ -545,7 +545,7 @@ def infer(self, skeletonseq_batch): skeletonseq_batch = SkeletonSequence(skeletonseq_batch) skeletonseq_batch = torch.from_numpy(skeletonseq_batch.numpy()) - if self.device == "cuda": + if "cuda" in self.device: skeletonseq_batch = Variable(skeletonseq_batch.float().cuda(self.output_device), requires_grad=False) else: skeletonseq_batch = Variable(skeletonseq_batch.float(), requires_grad=False) @@ -607,7 +607,7 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=Fals c, t, v, m = [self.in_channels, 300, self.num_point, self.num_person] n = self.batch_size onnx_input = torch.randn(n, c, t, v, m) - if self.device == "cuda": + if "cuda" in self.device: onnx_input = Variable(onnx_input.float().cuda(self.output_device), requires_grad=False) else: onnx_input = Variable(onnx_input.float(), requires_grad=False) @@ -714,7 +714,7 @@ def __load_from_pt(self, path, verbose=True): raise e if verbose: print("Loading checkpoint") - if self.device == "cuda": + if "cuda" in self.device: weights = OrderedDict( [[k.split('module.')[-1], v.cuda(self.output_device)] for k, v in weights.items()]) else: @@ -964,7 +964,7 @@ def __count_parameters(self): return sum(p.numel() for p in self.model.parameters() if p.requires_grad) def __init_seed(self, seed): - if self.device == "cuda": + if "cuda" in self.device: torch.cuda.manual_seed_all(seed) torch.backends.cudnn.enabled = True torch.backends.cudnn.deterministic = True diff --git a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py index 6589637f72..dce38659b7 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py @@ -104,7 +104,7 @@ def __init__(self, lr=1e-1, batch_size=128, optimizer_name='sgd', lr_schedule='' raise ValueError(self.method_name + "is not a valid dataset name. Supported methods: stgcn, tagcn, stbln") - if self.device == 'cuda': + if 'cuda' in self.device: self.output_device = self.device_ind[0] if type(self.device_ind) is list else self.device_ind self.__init_seed(1) @@ -165,7 +165,7 @@ def fit(self, dataset, val_dataset, logging_path='', silent=False, verbose=True, # Initialize the model if self.model is None: self.init_model() - if self.device == 'cuda': + if 'cuda' in self.device: self.model = self.model.cuda(self.output_device) if type(self.device_ind) is list: if len(self.device_ind) > 1: @@ -251,7 +251,7 @@ def fit(self, dataset, val_dataset, logging_path='', silent=False, verbose=True, for batch_idx, (data, label, index) in enumerate(process): self.global_step += 1 # get data - if self.device == 'cuda': + if 'cuda' in self.device: data = Variable(data.float().cuda(self.output_device), requires_grad=False) label = Variable(label.long().cuda(self.output_device), requires_grad=False) else: @@ -367,7 +367,7 @@ def eval(self, val_dataset, val_loader=None, epoch=0, silent=False, verbose=True process = tqdm(val_loader) for batch_idx, (data, label, index) in enumerate(process): with torch.no_grad(): - if self.device == "cuda": + if "cuda" in self.device: data = Variable(data.float().cuda(self.output_device), requires_grad=False) label = Variable(label.long().cuda(self.output_device), requires_grad=False) else: @@ -472,7 +472,7 @@ def __prepare_dataset(self, dataset, data_filename="train_joints.npy", def init_model(self): """Initializes the imported model.""" - cuda_ = (self.device == 'cuda') + cuda_ = ('cuda'in self.device) if self.method_name == 'stgcn': self.model = STGCN(num_class=self.num_class, num_point=self.num_point, num_person=self.num_person, in_channels=self.in_channels, graph_type=self.graph_type, @@ -508,7 +508,7 @@ def infer(self, SkeletonSeq_batch): SkeletonSeq_batch = SkeletonSequence(SkeletonSeq_batch) SkeletonSeq_batch = torch.from_numpy(SkeletonSeq_batch.numpy()) - if self.device == "cuda": + if "cuda" in self.device: SkeletonSeq_batch = Variable(SkeletonSeq_batch.float().cuda(self.output_device), requires_grad=False) else: SkeletonSeq_batch = Variable(SkeletonSeq_batch.float(), requires_grad=False) @@ -570,7 +570,7 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=Fals c, t, v, m = [self.in_channels, 300, self.num_point, self.num_person] n = self.batch_size onnx_input = torch.randn(n, c, t, v, m) - if self.device == "cuda": + if "cuda" in self.device: onnx_input = Variable(onnx_input.float().cuda(self.output_device), requires_grad=False) else: onnx_input = Variable(onnx_input.float(), requires_grad=False) @@ -678,7 +678,7 @@ def __load_from_pt(self, path, verbose=True): raise e if verbose: print("Loading checkpoint") - if self.device == "cuda": + if "cuda" in self.device: weights = OrderedDict( [[k.split('module.')[-1], v.cuda(self.output_device)] for k, v in weights.items()]) else: @@ -695,7 +695,7 @@ def __load_from_pt(self, path, verbose=True): print(' ' + d) state.update(weights) self.model.load_state_dict(state) - if self.device == "cuda": + if "cuda" in self.device: self.model = self.model.cuda(self.output_device) if type(self.device_ind) is list: if len(self.device_ind) > 1: @@ -913,7 +913,7 @@ def __count_parameters(self): return sum(p.numel() for p in self.model.parameters() if p.requires_grad) def __init_seed(self, seed): - if self.device == "cuda": + if "cuda" in self.device: torch.cuda.manual_seed_all(seed) torch.backends.cudnn.enabled = True torch.backends.cudnn.deterministic = True diff --git a/src/opendr/perception/speech_recognition/edgespeechnets/edgespeechnets_learner.py b/src/opendr/perception/speech_recognition/edgespeechnets/edgespeechnets_learner.py index 2882658539..a3802ca2a0 100644 --- a/src/opendr/perception/speech_recognition/edgespeechnets/edgespeechnets_learner.py +++ b/src/opendr/perception/speech_recognition/edgespeechnets/edgespeechnets_learner.py @@ -154,7 +154,7 @@ def _get_model_output(self, x): return predictions def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=True): - dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory=self.device == "cuda", shuffle=True) + dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory="cuda" in self.device, shuffle=True) if not self.checkpoint_load_iter == 0: checkpoint_filename = os.path.join( self.temp_path + f"EdgeSpeechNet{self.architecture}-{self.checkpoint_load_iter}.pth") @@ -193,7 +193,7 @@ def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=T return statistics def eval(self, dataset): - dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory=self.device == "cuda") + dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory="cuda" in self.device) self.model.eval() test_loss = 0 correct_predictions = 0 diff --git a/src/opendr/perception/speech_recognition/matchboxnet/matchboxnet_learner.py b/src/opendr/perception/speech_recognition/matchboxnet/matchboxnet_learner.py index 292887a1ce..0e5096f812 100644 --- a/src/opendr/perception/speech_recognition/matchboxnet/matchboxnet_learner.py +++ b/src/opendr/perception/speech_recognition/matchboxnet/matchboxnet_learner.py @@ -173,7 +173,7 @@ def _get_model_output(self, x): return predictions def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=True): - dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory=self.device == "cuda", shuffle=True) + dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory="cuda" in self.device, shuffle=True) if not self.checkpoint_load_iter == 0: checkpoint_filename = os.path.join( self.temp_path + f"MatchboxNet-{self.checkpoint_load_iter}.pth") @@ -212,7 +212,7 @@ def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=T return statistics def eval(self, dataset): - dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory=self.device == "cuda") + dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory="cuda" in self.device) self.model.eval() test_loss = 0 correct_predictions = 0 diff --git a/src/opendr/perception/speech_recognition/quadraticselfonn/quadraticselfonn_learner.py b/src/opendr/perception/speech_recognition/quadraticselfonn/quadraticselfonn_learner.py index b01ee58cca..5797c2fce4 100644 --- a/src/opendr/perception/speech_recognition/quadraticselfonn/quadraticselfonn_learner.py +++ b/src/opendr/perception/speech_recognition/quadraticselfonn/quadraticselfonn_learner.py @@ -145,7 +145,7 @@ def _get_model_output(self, x): return predictions def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=True): - dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory=self.device == "cuda", shuffle=True) + dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory="cuda" in self.device, shuffle=True) if not self.checkpoint_load_iter == 0: checkpoint_filename = os.path.join( self.temp_path + f"QuadraticSelfONN-{self.checkpoint_load_iter}.pth") @@ -184,7 +184,7 @@ def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=T return statistics def eval(self, dataset): - dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory=self.device == "cuda") + dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory= "cuda" in self.device) self.model.eval() test_loss = 0 correct_predictions = 0 diff --git a/src/opendr/simulation/human_model_generation/pifu_generator_learner.py b/src/opendr/simulation/human_model_generation/pifu_generator_learner.py index 31173a90d5..17733d369b 100644 --- a/src/opendr/simulation/human_model_generation/pifu_generator_learner.py +++ b/src/opendr/simulation/human_model_generation/pifu_generator_learner.py @@ -44,7 +44,7 @@ def __init__(self, device='cpu', checkpoint_dir=None): self.opt = config_vanilla_parameters(self.opt) # set cuda - if device == 'cuda' and torch.cuda.is_available(): + if 'cuda' in device and torch.cuda.is_available(): self.opt.cuda = True self.cuda = torch.device('cuda:%d' % self.opt.gpu_id) else: From 034afb05d8514e0fb80fbf3a108ebd3d57f39ca6 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 13:39:23 +0200 Subject: [PATCH 48/69] Fixed cuda load placement --- .../progressive_spatio_temporal_gcn_learner.py | 2 ++ .../spatio_temporal_gcn_learner.py | 3 +++ 2 files changed, 5 insertions(+) diff --git a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py index ab74680fc5..e7e5d1e5ac 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py @@ -697,6 +697,8 @@ def load(self, path, model_name, verbose=True): if verbose: print("Loaded ONNX model.") + self.model.to(self.device) + def __load_from_pt(self, path, verbose=True): """Loads the .pt model weights (or checkpoint) from the path provided. :param path: path of the directory the model (checkpoint) was saved diff --git a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py index dce38659b7..6c795c8a21 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py @@ -654,6 +654,7 @@ def load(self, path, model_name, verbose=True): metadata = json.load(metadata_file) if not metadata["optimized"]: self.__load_from_pt(os.path.join(path, model_name + '.pt')) + if verbose: print("Loaded Pytorch model.") else: @@ -661,6 +662,8 @@ def load(self, path, model_name, verbose=True): if verbose: print("Loaded ONNX model.") + self.model.to(self.device) + def __load_from_pt(self, path, verbose=True): """Loads the .pt model weights (or checkpoint) from the path provided. :param path: path of the directory the model (checkpoint) was saved From 49e45b3006650d5c977b9b1c5314647d1ee3a59d Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 13:41:37 +0200 Subject: [PATCH 49/69] Reverted cuda load placement --- .../progressive_spatio_temporal_gcn_learner.py | 2 -- .../spatio_temporal_gcn_learner.py | 3 --- 2 files changed, 5 deletions(-) diff --git a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py index e7e5d1e5ac..ab74680fc5 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py @@ -697,8 +697,6 @@ def load(self, path, model_name, verbose=True): if verbose: print("Loaded ONNX model.") - self.model.to(self.device) - def __load_from_pt(self, path, verbose=True): """Loads the .pt model weights (or checkpoint) from the path provided. :param path: path of the directory the model (checkpoint) was saved diff --git a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py index 6c795c8a21..dce38659b7 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py @@ -654,7 +654,6 @@ def load(self, path, model_name, verbose=True): metadata = json.load(metadata_file) if not metadata["optimized"]: self.__load_from_pt(os.path.join(path, model_name + '.pt')) - if verbose: print("Loaded Pytorch model.") else: @@ -662,8 +661,6 @@ def load(self, path, model_name, verbose=True): if verbose: print("Loaded ONNX model.") - self.model.to(self.device) - def __load_from_pt(self, path, verbose=True): """Loads the .pt model weights (or checkpoint) from the path provided. :param path: path of the directory the model (checkpoint) was saved From d93f23fceffd7851000bbdaee27f8c4944e17f96 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 13:54:02 +0200 Subject: [PATCH 50/69] Fixed device placement --- .../spatio_temporal_gcn_learner.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py index dce38659b7..7276dbe144 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py @@ -472,7 +472,7 @@ def __prepare_dataset(self, dataset, data_filename="train_joints.npy", def init_model(self): """Initializes the imported model.""" - cuda_ = ('cuda'in self.device) + cuda_ = ('cuda' in self.device) if self.method_name == 'stgcn': self.model = STGCN(num_class=self.num_class, num_point=self.num_point, num_person=self.num_person, in_channels=self.in_channels, graph_type=self.graph_type, @@ -492,6 +492,7 @@ def init_model(self): if self.logging: shutil.copy2(inspect.getfile(STBLN), self.logging_path) self.loss = nn.CrossEntropyLoss() + self.model.to(self.device) # print(self.model) def infer(self, SkeletonSeq_batch): From 70e1fe1e3c32e31d217c367ab7720318a43bb1b9 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 14:05:52 +0200 Subject: [PATCH 51/69] Fixed device placement --- .../lightweight_open_pose/lightweight_open_pose_learner.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/opendr/perception/pose_estimation/lightweight_open_pose/lightweight_open_pose_learner.py b/src/opendr/perception/pose_estimation/lightweight_open_pose/lightweight_open_pose_learner.py index 66781d3bcb..d46fff3564 100644 --- a/src/opendr/perception/pose_estimation/lightweight_open_pose/lightweight_open_pose_learner.py +++ b/src/opendr/perception/pose_estimation/lightweight_open_pose/lightweight_open_pose_learner.py @@ -746,6 +746,7 @@ def init_model(self): groups=self.shufflenet_groups) else: raise UserWarning("Tried to initialize model while model is already initialized.") + self.model.to(self.device) def __save(self, path, optimizer, scheduler, iter_, current_epoch): """ From f77538d999c1968325ae5bbd0b51433d923cffbb Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 15:17:44 +0200 Subject: [PATCH 52/69] Fixed mxnet installation --- bin/install.sh | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/bin/install.sh b/bin/install.sh index feb24e81ac..a2d03f95a7 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -37,8 +37,6 @@ sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main # If working on GPU install GPU dependencies as needed if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then - echo "[INFO] Installing mxnet-cu112==1.8.0post0. You can override this later if you are using a different CUDA version." - pip3 install mxnet-cu112==1.8.0post0 echo "[INFO] Installing torch==1.8.1+cu111. You can override this later if you are using a different CUDA version." pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html fi @@ -47,6 +45,16 @@ fi make install_compilation_dependencies make install_runtime_dependencies +# If working on GPU install GPU dependencies as needed +if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then + pip3 uninstall mxnet + pip3 uninstall torch + echo "[INFO] Installing mxnet-cu112==1.8.0post0. You can override this later if you are using a different CUDA version." + pip3 install mxnet-cu112==1.8.0post0 + echo "[INFO] Installing torch==1.8.1+cu111. You can override this later if you are using a different CUDA version." + pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html +fi + make libopendr deactivate From 7121090954bce17a87a6f58c83402553f854ac52 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 15:19:38 +0200 Subject: [PATCH 53/69] Updated base image --- Dockerfile-cuda | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile-cuda b/Dockerfile-cuda index 1a0d080fda..9aeeec4eec 100644 --- a/Dockerfile-cuda +++ b/Dockerfile-cuda @@ -1,4 +1,4 @@ -FROM nvidia/cuda:11.2.0-devel-ubuntu20.04 +FROM nvidia/cuda:11.2.0-cudnn8-devel-ubuntu20.04 # Install dependencies RUN apt-get update && \ From da7ac73b476a5c51c2e21bce5a2eaf0f3559f9aa Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 16:16:11 +0200 Subject: [PATCH 54/69] Separate object detection 2d tests --- .github/workflows/test_packages.yml | 12 ++++++++++-- .github/workflows/tests_suite.yml | 24 +++++++++++++++++++---- .github/workflows/tests_suite_develop.yml | 24 +++++++++++++++++++---- 3 files changed, 50 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test_packages.yml b/.github/workflows/test_packages.yml index 637a0170fb..bc94d9329e 100644 --- a/.github/workflows/test_packages.yml +++ b/.github/workflows/test_packages.yml @@ -39,7 +39,11 @@ jobs: - perception/speech_recognition - perception/skeleton_based_action_recognition - perception/semantic_segmentation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -83,7 +87,11 @@ jobs: - perception/speech_recognition - perception/skeleton_based_action_recognition - perception/semantic_segmentation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d diff --git a/.github/workflows/tests_suite.yml b/.github/workflows/tests_suite.yml index a8f7e7abbe..f8609906f6 100644 --- a/.github/workflows/tests_suite.yml +++ b/.github/workflows/tests_suite.yml @@ -71,7 +71,11 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - control/mobile_manipulation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - simulation/human_model_generation - perception/facial_expression_recognition @@ -169,7 +173,11 @@ jobs: - perception/speech_recognition - perception/skeleton_based_action_recognition - perception/semantic_segmentation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -234,7 +242,11 @@ jobs: - perception/speech_recognition - perception/skeleton_based_action_recognition - perception/semantic_segmentation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -305,7 +317,11 @@ jobs: - perception/speech_recognition - perception/skeleton_based_action_recognition - perception/semantic_segmentation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d diff --git a/.github/workflows/tests_suite_develop.yml b/.github/workflows/tests_suite_develop.yml index 42d9fe5108..34120682db 100644 --- a/.github/workflows/tests_suite_develop.yml +++ b/.github/workflows/tests_suite_develop.yml @@ -70,7 +70,11 @@ jobs: - perception/skeleton_based_action_recognition - perception/semantic_segmentation - control/mobile_manipulation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - simulation/human_model_generation - perception/facial_expression_recognition @@ -169,7 +173,11 @@ jobs: - perception/speech_recognition - perception/skeleton_based_action_recognition - perception/semantic_segmentation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -235,7 +243,11 @@ jobs: - perception/speech_recognition - perception/skeleton_based_action_recognition - perception/semantic_segmentation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -307,7 +319,11 @@ jobs: - perception/speech_recognition - perception/skeleton_based_action_recognition - perception/semantic_segmentation - - perception/object_detection_2d + - perception/object_detection_2d/centernet + - perception/object_detection_2d/detr + - perception/object_detection_2d/gem + - perception/object_detection_2d/ssd + - perception/object_detection_2d/yolo - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d From ce9e73242a97fa3e130c75656d23b1a987cf7140 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 16:16:30 +0200 Subject: [PATCH 55/69] Fixed install.sh --- bin/install.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/install.sh b/bin/install.sh index a2d03f95a7..ea40f8e430 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -47,8 +47,8 @@ make install_runtime_dependencies # If working on GPU install GPU dependencies as needed if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then - pip3 uninstall mxnet - pip3 uninstall torch + pip3 uninstall -y mxnet + pip3 uninstall -y torch echo "[INFO] Installing mxnet-cu112==1.8.0post0. You can override this later if you are using a different CUDA version." pip3 install mxnet-cu112==1.8.0post0 echo "[INFO] Installing torch==1.8.1+cu111. You can override this later if you are using a different CUDA version." From eb9e6489f031fe73c78c7f7d8fa64aa9e22446aa Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 19:36:22 +0200 Subject: [PATCH 56/69] Style fix --- .../quadraticselfonn/quadraticselfonn_learner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/perception/speech_recognition/quadraticselfonn/quadraticselfonn_learner.py b/src/opendr/perception/speech_recognition/quadraticselfonn/quadraticselfonn_learner.py index 5797c2fce4..a723ef5686 100644 --- a/src/opendr/perception/speech_recognition/quadraticselfonn/quadraticselfonn_learner.py +++ b/src/opendr/perception/speech_recognition/quadraticselfonn/quadraticselfonn_learner.py @@ -184,7 +184,7 @@ def fit(self, dataset, val_dataset=None, logging_path='', silent=True, verbose=T return statistics def eval(self, dataset): - dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory= "cuda" in self.device) + dataloader = DataLoader(dataset, batch_size=self.batch_size, pin_memory="cuda" in self.device) self.model.eval() test_loss = 0 correct_predictions = 0 From 0eb831245f9d71931ff8e87141aa662372720cc7 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 20:30:37 +0200 Subject: [PATCH 57/69] Fixed test name --- .github/workflows/test_packages.yml | 4 ++-- .github/workflows/tests_suite.yml | 8 ++++---- .github/workflows/tests_suite_develop.yml | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/test_packages.yml b/.github/workflows/test_packages.yml index bc94d9329e..15593a96ef 100644 --- a/.github/workflows/test_packages.yml +++ b/.github/workflows/test_packages.yml @@ -43,7 +43,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -91,7 +91,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d diff --git a/.github/workflows/tests_suite.yml b/.github/workflows/tests_suite.yml index f8609906f6..b6db527c0d 100644 --- a/.github/workflows/tests_suite.yml +++ b/.github/workflows/tests_suite.yml @@ -75,7 +75,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - simulation/human_model_generation - perception/facial_expression_recognition @@ -177,7 +177,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -246,7 +246,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -321,7 +321,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d diff --git a/.github/workflows/tests_suite_develop.yml b/.github/workflows/tests_suite_develop.yml index 34120682db..0661ef56b5 100644 --- a/.github/workflows/tests_suite_develop.yml +++ b/.github/workflows/tests_suite_develop.yml @@ -74,7 +74,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - simulation/human_model_generation - perception/facial_expression_recognition @@ -177,7 +177,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -247,7 +247,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - perception/facial_expression_recognition # - perception/object_detection_3d @@ -323,7 +323,7 @@ jobs: - perception/object_detection_2d/detr - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - - perception/object_detection_2d/yolo + - perception/object_detection_2d/yolov3 - perception/face_detection_2d - perception/facial_expression_recognition - perception/object_detection_3d From f7836ec8773a405dffc18761fb4ddf93b0739569 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 13 Mar 2022 21:11:20 +0200 Subject: [PATCH 58/69] Moved retinaface back to object detection 2d --- .github/workflows/test_packages.yml | 4 ++-- .github/workflows/tests_suite.yml | 8 ++++---- .github/workflows/tests_suite_develop.yml | 8 ++++---- .../perception/face_detection_2d/retinaface/__init__.py | 0 .../retinaface}/__init__.py | 0 .../retinaface/test_retinaface.py | 0 6 files changed, 10 insertions(+), 10 deletions(-) delete mode 100644 tests/sources/tools/perception/face_detection_2d/retinaface/__init__.py rename tests/sources/tools/perception/{face_detection_2d => object_detection_2d/retinaface}/__init__.py (100%) rename tests/sources/tools/perception/{face_detection_2d => object_detection_2d}/retinaface/test_retinaface.py (100%) diff --git a/.github/workflows/test_packages.yml b/.github/workflows/test_packages.yml index 15593a96ef..7fabc5b512 100644 --- a/.github/workflows/test_packages.yml +++ b/.github/workflows/test_packages.yml @@ -44,7 +44,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -92,7 +92,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - perception/facial_expression_recognition - perception/object_detection_3d - control/mobile_manipulation diff --git a/.github/workflows/tests_suite.yml b/.github/workflows/tests_suite.yml index b6db527c0d..0fdefc7fec 100644 --- a/.github/workflows/tests_suite.yml +++ b/.github/workflows/tests_suite.yml @@ -76,7 +76,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - simulation/human_model_generation - perception/facial_expression_recognition - control/single_demo_grasp @@ -178,7 +178,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -247,7 +247,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -322,7 +322,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - perception/facial_expression_recognition - perception/object_detection_3d - control/mobile_manipulation diff --git a/.github/workflows/tests_suite_develop.yml b/.github/workflows/tests_suite_develop.yml index 0661ef56b5..4f6ddaaaa3 100644 --- a/.github/workflows/tests_suite_develop.yml +++ b/.github/workflows/tests_suite_develop.yml @@ -75,7 +75,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - simulation/human_model_generation - perception/facial_expression_recognition - control/single_demo_grasp @@ -178,7 +178,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -248,7 +248,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - perception/facial_expression_recognition # - perception/object_detection_3d # - control/mobile_manipulation @@ -324,7 +324,7 @@ jobs: - perception/object_detection_2d/gem - perception/object_detection_2d/ssd - perception/object_detection_2d/yolov3 - - perception/face_detection_2d + - perception/object_detection_2d/retinaface - perception/facial_expression_recognition - perception/object_detection_3d - control/mobile_manipulation diff --git a/tests/sources/tools/perception/face_detection_2d/retinaface/__init__.py b/tests/sources/tools/perception/face_detection_2d/retinaface/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/tests/sources/tools/perception/face_detection_2d/__init__.py b/tests/sources/tools/perception/object_detection_2d/retinaface/__init__.py similarity index 100% rename from tests/sources/tools/perception/face_detection_2d/__init__.py rename to tests/sources/tools/perception/object_detection_2d/retinaface/__init__.py diff --git a/tests/sources/tools/perception/face_detection_2d/retinaface/test_retinaface.py b/tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py similarity index 100% rename from tests/sources/tools/perception/face_detection_2d/retinaface/test_retinaface.py rename to tests/sources/tools/perception/object_detection_2d/retinaface/test_retinaface.py From 125327ab680683aac047d8a7e591b8db3aa45103 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Mon, 14 Mar 2022 07:17:28 +0200 Subject: [PATCH 59/69] Fixed installation script --- CHANGELOG.md | 2 +- bin/install.sh | 12 ++++-------- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a85f2e25f..ae0dc87ed7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ Released on XX, XXth, 2022. - Simplified the installation process for pip by including the appropriate post-installation scripts ([#201](https://github.com/opendr-eu/opendr/pull/201)). - Improved the structure of the toolkit by moving `io` from `utils` to `engine.helper` ([#201](https://github.com/opendr-eu/opendr/pull/201)). - Added support for `post-install` scripts and `opendr` dependencies in `.ini` files ([#201](https://github.com/opendr-eu/opendr/pull/201)). - - Updated toolkit to support CUDA 11.1 + - Updated toolkit to support CUDA 11.2 and improved GPU support ([#215](https://github.com/opendr-eu/opendr/pull/215)). - Bug Fixes: - Updated wheel building pipeline to include missing files and removed unnecessary dependencies ([#200](https://github.com/opendr-eu/opendr/pull/200)). - `panoptic_segmentation/efficient_ps`: updated dataset preparation scripts to create correct validation ground truth ([#221](https://github.com/opendr-eu/opendr/pull/221)). diff --git a/bin/install.sh b/bin/install.sh index ea40f8e430..85d76a8f62 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -35,12 +35,6 @@ pip3 install setuptools configparser sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' \ && curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | sudo apt-key add - -# If working on GPU install GPU dependencies as needed -if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then - echo "[INFO] Installing torch==1.8.1+cu111. You can override this later if you are using a different CUDA version." - pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html -fi - # Build OpenDR make install_compilation_dependencies make install_runtime_dependencies @@ -49,10 +43,12 @@ make install_runtime_dependencies if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then pip3 uninstall -y mxnet pip3 uninstall -y torch - echo "[INFO] Installing mxnet-cu112==1.8.0post0. You can override this later if you are using a different CUDA version." + echo "[INFO] Replacing mxnet-cu112==1.8.0post0 to enable CUDA acceleration." pip3 install mxnet-cu112==1.8.0post0 - echo "[INFO] Installing torch==1.8.1+cu111. You can override this later if you are using a different CUDA version." + echo "[INFO] Replacing torch==1.8.1+cu111 to enable CUDA acceleration." pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html + echo "[INFO] Reinstalling detectronv2." + pip3 install 'git+https://github.com/facebookresearch/detectron2.git' fi make libopendr From d6070ed5f9d7b00d827183ccf2a7e25c16c4aca3 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Mon, 14 Mar 2022 10:19:12 +0200 Subject: [PATCH 60/69] Upgraded to pytorch 1.9.0 --- bin/install.sh | 4 ++-- dependencies/dependencies.ini | 2 +- docs/reference/installation.md | 2 +- src/opendr/control/mobile_manipulation/dependencies.ini | 2 +- src/opendr/control/single_demo_grasp/dependencies.ini | 8 ++++---- src/opendr/engine/dependencies.ini | 2 +- .../perception/activity_recognition/dependencies.ini | 4 ++-- .../perception/compressive_learning/dependencies.ini | 4 ++-- src/opendr/perception/face_recognition/dependencies.ini | 4 ++-- .../dependencies.ini | 4 ++-- .../perception/heart_anomaly_detection/dependencies.ini | 4 ++-- .../perception/multimodal_human_centric/dependencies.ini | 4 ++-- .../perception/object_detection_2d/detr/dependencies.ini | 4 ++-- .../perception/object_detection_2d/gem/dependencies.ini | 4 ++-- .../voxel_object_detection_3d/dependencies.ini | 4 ++-- .../object_tracking_2d/fair_mot/dependencies.ini | 4 ++-- .../panoptic_segmentation/efficient_ps/dependencies.ini | 4 ++-- src/opendr/perception/pose_estimation/dependencies.ini | 4 ++-- .../perception/semantic_segmentation/dependencies.ini | 4 ++-- .../skeleton_based_action_recognition/dependencies.ini | 4 ++-- .../speech_recognition/edgespeechnets/dependencies.ini | 2 +- .../speech_recognition/matchboxnet/dependencies.ini | 2 +- .../speech_recognition/quadraticselfonn/dependencies.ini | 2 +- .../simulation/human_model_generation/dependencies.ini | 4 ++-- 24 files changed, 43 insertions(+), 43 deletions(-) diff --git a/bin/install.sh b/bin/install.sh index 85d76a8f62..cada049559 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -45,8 +45,8 @@ if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then pip3 uninstall -y torch echo "[INFO] Replacing mxnet-cu112==1.8.0post0 to enable CUDA acceleration." pip3 install mxnet-cu112==1.8.0post0 - echo "[INFO] Replacing torch==1.8.1+cu111 to enable CUDA acceleration." - pip3 install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html + echo "[INFO] Replacing torch==1.9.1+cu111 to enable CUDA acceleration." + pip3 install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html echo "[INFO] Reinstalling detectronv2." pip3 install 'git+https://github.com/facebookresearch/detectron2.git' fi diff --git a/dependencies/dependencies.ini b/dependencies/dependencies.ini index f5cb44d7b2..529118e495 100644 --- a/dependencies/dependencies.ini +++ b/dependencies/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 +python=torch==1.9.1 wheel [device] diff --git a/docs/reference/installation.md b/docs/reference/installation.md index 0774f385e2..29c645ce30 100644 --- a/docs/reference/installation.md +++ b/docs/reference/installation.md @@ -97,7 +97,7 @@ sudo apt install python3.8-venv libfreetype6-dev git build-essential cmake pytho python3 -m venv venv source venv/bin/activate pip install wheel -pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html +pip install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html pip install 'git+https://github.com/facebookresearch/detectron2.git' pip install mxnet-cu112==1.8.0post0 pip install opendr-toolkit-engine diff --git a/src/opendr/control/mobile_manipulation/dependencies.ini b/src/opendr/control/mobile_manipulation/dependencies.ini index bdea48e92d..4302f72988 100644 --- a/src/opendr/control/mobile_manipulation/dependencies.ini +++ b/src/opendr/control/mobile_manipulation/dependencies.ini @@ -10,7 +10,7 @@ python=vcstool [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 +python=torch==1.9.0 tensorboard numpy pyyaml diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini index 8895b3dac7..f2bd802a46 100644 --- a/src/opendr/control/single_demo_grasp/dependencies.ini +++ b/src/opendr/control/single_demo_grasp/dependencies.ini @@ -1,12 +1,12 @@ [compilation] -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 matplotlib>=2.2.2 imgaug==0.4.0 pillow>=8.3.2 diff --git a/src/opendr/engine/dependencies.ini b/src/opendr/engine/dependencies.ini index 214bcf53f6..06a2ea98b9 100644 --- a/src/opendr/engine/dependencies.ini +++ b/src/opendr/engine/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 +python=torch==1.9.0 wheel Cython opencv-python==4.5.1.48 diff --git a/src/opendr/perception/activity_recognition/dependencies.ini b/src/opendr/perception/activity_recognition/dependencies.ini index 325c1ca6a6..ea5aaa01db 100644 --- a/src/opendr/perception/activity_recognition/dependencies.ini +++ b/src/opendr/perception/activity_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tqdm onnx==1.8.0 onnxruntime==1.3.0 diff --git a/src/opendr/perception/compressive_learning/dependencies.ini b/src/opendr/perception/compressive_learning/dependencies.ini index 2e4704ac86..c1958768b4 100644 --- a/src/opendr/perception/compressive_learning/dependencies.ini +++ b/src/opendr/perception/compressive_learning/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tensorboard>=2.4.1 tqdm diff --git a/src/opendr/perception/face_recognition/dependencies.ini b/src/opendr/perception/face_recognition/dependencies.ini index 94f4fefe7a..5f2b1c66a1 100644 --- a/src/opendr/perception/face_recognition/dependencies.ini +++ b/src/opendr/perception/face_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 bcolz>=1.2.1 onnx==1.8.0 onnxruntime==1.3.0 diff --git a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini index 1096f367dd..a201ecf621 100644 --- a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini +++ b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tensorboardX>=2.0 matplotlib>=2.2.2 tqdm diff --git a/src/opendr/perception/heart_anomaly_detection/dependencies.ini b/src/opendr/perception/heart_anomaly_detection/dependencies.ini index 59f53bb169..c781145c09 100644 --- a/src/opendr/perception/heart_anomaly_detection/dependencies.ini +++ b/src/opendr/perception/heart_anomaly_detection/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tensorboard>=2.4.1 tqdm scikit-learn>=0.22 diff --git a/src/opendr/perception/multimodal_human_centric/dependencies.ini b/src/opendr/perception/multimodal_human_centric/dependencies.ini index 86c3d14d82..499e9408f4 100644 --- a/src/opendr/perception/multimodal_human_centric/dependencies.ini +++ b/src/opendr/perception/multimodal_human_centric/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tensorboard>=2.4.1 tqdm imageio>=2.6.0 diff --git a/src/opendr/perception/object_detection_2d/detr/dependencies.ini b/src/opendr/perception/object_detection_2d/detr/dependencies.ini index 75203bff81..ed330bb49a 100644 --- a/src/opendr/perception/object_detection_2d/detr/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/detr/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 pycocotools>=2.0.4 git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi scipy diff --git a/src/opendr/perception/object_detection_2d/gem/dependencies.ini b/src/opendr/perception/object_detection_2d/gem/dependencies.ini index ebd9de4c8f..e3fb6d356b 100644 --- a/src/opendr/perception/object_detection_2d/gem/dependencies.ini +++ b/src/opendr/perception/object_detection_2d/gem/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 pillow>=8.3.2 opencv-python==4.5.1.48 pycocotools>=2.0.4 diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini index c045e430e9..ac7db7fa35 100644 --- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini +++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini index da5c2d5ef9..757d9a27e1 100644 --- a/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini +++ b/src/opendr/perception/object_tracking_2d/fair_mot/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini b/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini index d2dc0b1e36..1c7a1de4c2 100644 --- a/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini +++ b/src/opendr/perception/panoptic_segmentation/efficient_ps/dependencies.ini @@ -1,7 +1,7 @@ [runtime] python= - torch==1.8.1 - torchvision==0.9.1 + torch==1.9.0 + torchvision==0.10.0 tqdm mmcv==0.5.9 future diff --git a/src/opendr/perception/pose_estimation/dependencies.ini b/src/opendr/perception/pose_estimation/dependencies.ini index aea6ef17bc..f4628452dd 100644 --- a/src/opendr/perception/pose_estimation/dependencies.ini +++ b/src/opendr/perception/pose_estimation/dependencies.ini @@ -2,8 +2,8 @@ # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format python-dependencies=cython -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tensorboardX>=2.0 opencv-python==4.5.1.48 matplotlib>=2.2.2 diff --git a/src/opendr/perception/semantic_segmentation/dependencies.ini b/src/opendr/perception/semantic_segmentation/dependencies.ini index d0a21e1d58..98ef459c41 100644 --- a/src/opendr/perception/semantic_segmentation/dependencies.ini +++ b/src/opendr/perception/semantic_segmentation/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tqdm imgaug>=0.4.0 pillow>=8.3.2 diff --git a/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini b/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini index 6b836c6741..6b8af3805a 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini +++ b/src/opendr/perception/skeleton_based_action_recognition/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 tensorboardX>=2.0 matplotlib>=2.2.2 tqdm diff --git a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini index bcf5a3a98d..981ac6776e 100644 --- a/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini +++ b/src/opendr/perception/speech_recognition/edgespeechnets/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 +python=torch==1.9.0 librosa==0.8.0 numpy>=1.19 numba==0.53.0 diff --git a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini index f2571d9b82..0fad5d3ead 100644 --- a/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini +++ b/src/opendr/perception/speech_recognition/matchboxnet/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 +python=torch==1.9.0 librosa==0.8.0 numpy>=1.19 numba==0.53.0 diff --git a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini index bcf5a3a98d..981ac6776e 100644 --- a/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini +++ b/src/opendr/perception/speech_recognition/quadraticselfonn/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 +python=torch==1.9.0 librosa==0.8.0 numpy>=1.19 numba==0.53.0 diff --git a/src/opendr/simulation/human_model_generation/dependencies.ini b/src/opendr/simulation/human_model_generation/dependencies.ini index 7797a4cf66..a98d181d66 100644 --- a/src/opendr/simulation/human_model_generation/dependencies.ini +++ b/src/opendr/simulation/human_model_generation/dependencies.ini @@ -1,8 +1,8 @@ [runtime] # 'python' key expects a value using the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.8.1 - torchvision==0.9.1 +python=torch==1.9.0 + torchvision==0.10.0 pyglet>=1.5.16 opencv-python==4.5.1.48 pillow>=8.3.2 From 7d8a643c0b890cf507c3296a5485fefb61e55a59 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Mon, 14 Mar 2022 10:28:12 +0200 Subject: [PATCH 61/69] Upgraded to pytorch 1.9.0 --- bin/install.sh | 2 +- dependencies/dependencies.ini | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/install.sh b/bin/install.sh index cada049559..c2d7f62a87 100755 --- a/bin/install.sh +++ b/bin/install.sh @@ -45,7 +45,7 @@ if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then pip3 uninstall -y torch echo "[INFO] Replacing mxnet-cu112==1.8.0post0 to enable CUDA acceleration." pip3 install mxnet-cu112==1.8.0post0 - echo "[INFO] Replacing torch==1.9.1+cu111 to enable CUDA acceleration." + echo "[INFO] Replacing torch==1.9.0+cu111 to enable CUDA acceleration." pip3 install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html echo "[INFO] Reinstalling detectronv2." pip3 install 'git+https://github.com/facebookresearch/detectron2.git' diff --git a/dependencies/dependencies.ini b/dependencies/dependencies.ini index 529118e495..33b7ba7060 100644 --- a/dependencies/dependencies.ini +++ b/dependencies/dependencies.ini @@ -1,7 +1,7 @@ [runtime] # 'python' and 'python-dependencies' keys expect a value in the Python requirements file format # https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format -python=torch==1.9.1 +python=torch==1.9.0 wheel [device] From c28c17032854c4c49836379645ee614eb2c972fd Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Mon, 14 Mar 2022 16:00:27 +0200 Subject: [PATCH 62/69] Changed default constant folding setting --- .../progressive_spatio_temporal_bln_learner.py | 2 +- .../progressive_spatio_temporal_gcn_learner.py | 2 +- .../spatio_temporal_gcn_learner.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py index 89603c5aaf..952575a3ef 100644 --- a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py +++ b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py @@ -594,7 +594,7 @@ def infer(self, facial_landmarks_batch, monte_carlo_dropout=True, mcdo_repeats=1 return category - def optimize(self, do_constant_folding=False): + def optimize(self, do_constant_folding=True): """ Optimize method converts the model to ONNX format and saves the model in the parent directory defined by self.temp_path. The ONNX model is then loaded. diff --git a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py index ab74680fc5..bba3d45c92 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py @@ -573,7 +573,7 @@ def infer(self, skeletonseq_batch): return category - def optimize(self, do_constant_folding=False): + def optimize(self, do_constant_folding=True): """ Optimize method converts the model to ONNX format and saves the model in the parent directory defined by self.temp_path. The ONNX model is then loaded. diff --git a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py index 7276dbe144..5b467337a9 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py @@ -537,7 +537,7 @@ def infer(self, SkeletonSeq_batch): return category - def optimize(self, do_constant_folding=False): + def optimize(self, do_constant_folding=True): """ Optimize method converts the model to ONNX format and saves the model in the parent directory defined by self.temp_path. The ONNX model is then loaded. From b3e3b2c3099b367c51e7213897c46d6f29683a72 Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Wed, 16 Mar 2022 08:35:20 +0200 Subject: [PATCH 63/69] Updated EfficientPS --- .../panoptic_segmentation/efficient_ps/algorithm/EfficientPS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/opendr/perception/panoptic_segmentation/efficient_ps/algorithm/EfficientPS b/src/opendr/perception/panoptic_segmentation/efficient_ps/algorithm/EfficientPS index f84ec27610..d03deab54e 160000 --- a/src/opendr/perception/panoptic_segmentation/efficient_ps/algorithm/EfficientPS +++ b/src/opendr/perception/panoptic_segmentation/efficient_ps/algorithm/EfficientPS @@ -1 +1 @@ -Subproject commit f84ec27610e6e57a26f184e28b1da286d496b743 +Subproject commit d03deab54edc5da15ed63318b3d1b14fb9712441 From 3ecd095b1fe9a313b3cebb625d3ec7158f31a2bc Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Fri, 18 Mar 2022 11:01:59 +0200 Subject: [PATCH 64/69] Apply suggestions from code review Co-authored-by: ad-daniel <44834743+ad-daniel@users.noreply.github.com> --- docs/reference/installation.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/installation.md b/docs/reference/installation.md index 29c645ce30..73ecb1049f 100644 --- a/docs/reference/installation.md +++ b/docs/reference/installation.md @@ -37,7 +37,7 @@ Please also make sure that you have enough RAM available for the installation (a If you want to install GPU-related dependencies, then you can appropriately set the `OPENDR_DEVICE` variable. -The toolkit defaults to using CPU. +The toolkit defaults to using CPU. Therefore, if you want to use GPU, please set this variable accordingly *before* running the installation script: ```bash export OPENDR_DEVICE=gpu @@ -59,7 +59,7 @@ make unittest make ctests ``` -If you plan to use GPU-enabled functionalities, then you are advised to install [CUDA 11.1](https://developer.nvidia.com/cuda-11.1.0-download-archive), along with [CuDNN](https://developer.nvidia.com/cudnn). +If you plan to use GPU-enabled functionalities, then you are advised to install [CUDA 11.2](https://developer.nvidia.com/cuda-11.2.0-download-archive), along with [CuDNN](https://developer.nvidia.com/cudnn). **HINT:** All tests probe for the `TEST_DEVICE` enviromental variable when running. If this enviromental variable is set during testing, it allows for easily running all tests on a different device (e.g., setting `TEST_DEVICE=cuda:0` runs all tests on the first GPU of the system). From 91adfe0d8dce9808804c7f5ab48499fb0b9f7f8a Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Sun, 20 Mar 2022 10:45:56 +0200 Subject: [PATCH 65/69] Bugfixes --- ...progressive_spatio_temporal_bln_learner.py | 19 +++++++++++++++-- ...progressive_spatio_temporal_gcn_learner.py | 20 +++++++++++++++++- .../spatio_temporal_gcn_learner.py | 21 ++++++++++++++++++- .../cox3d/test_cox3d_learner.py | 2 +- 4 files changed, 57 insertions(+), 5 deletions(-) diff --git a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py index 952575a3ef..b5b048bb30 100644 --- a/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py +++ b/src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/progressive_spatio_temporal_bln_learner.py @@ -616,7 +616,7 @@ def optimize(self, do_constant_folding=True): self.__load_from_onnx(os.path.join(self.parent_dir, self.experiment_name, "onnx_model_temp.onnx")) - def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=True): + def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=False): """ Converts the loaded regular PyTorch model to an ONNX model and saves it to disk. :param output_name: path and name to save the model, e.g. "/models/onnx_model.onnx" @@ -636,8 +636,16 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=True " CK+, AFEW") n = self.batch_size onnx_input = torch.randn(n, c, t, v, m) + if "cuda" in self.device: - onnx_input = Variable(onnx_input.float().cuda(self.output_device), requires_grad=False) + print("[WARN] Temporarily moving model to CPU for ONNX exporting.") + # This is a hack due to https://github.com/pytorch/pytorch/issues/72175 + # Some parts of the model do not make it to GPU, exporting it through CPU + self.model.cpu() + self.model.cuda_ = False + for x in self.model.layers: + self.model.layers[x].bln.cuda_ = False + onnx_input = Variable(onnx_input.float(), requires_grad=False) else: onnx_input = Variable(onnx_input.float(), requires_grad=False) # Export the model @@ -652,6 +660,13 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=True dynamic_axes={'onnx_input': {0: 'n'}, # variable lenght axes 'onnx_output': {0: 'n'}}) + # This is a hack due to https://github.com/pytorch/pytorch/issues/72175 (see above) + if "cuda" in self.device: + self.model.cuda_ = True + for x in self.model.layers: + self.model.layers[x].bln.cuda_ = True + self.model.cuda(self.output_device) + def save(self, path, model_name, verbose=True): """ This method is used to save a trained model. diff --git a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py index bba3d45c92..f8b2ab3664 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/progressive_spatio_temporal_gcn_learner.py @@ -608,7 +608,17 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=Fals n = self.batch_size onnx_input = torch.randn(n, c, t, v, m) if "cuda" in self.device: - onnx_input = Variable(onnx_input.float().cuda(self.output_device), requires_grad=False) + print("[WARN] Temporarily moving model to CPU for ONNX exporting.") + # This is a hack due to https://github.com/pytorch/pytorch/issues/72175 + # Some parts of the model do not make it to GPU, exporting it through CPU + self.model.cpu() + self.model.cuda_ = False + for x in self.model.layers: + if hasattr(self.model.layers[x], 'gcn'): + self.model.layers[x].gcn.cuda_ = False + elif hasattr(self.model.layers[x], 'cuda_'): + self.model.layers[x].cuda_ = False + onnx_input = Variable(onnx_input.float(), requires_grad=False) else: onnx_input = Variable(onnx_input.float(), requires_grad=False) # torch_out = self.model(onnx_input) @@ -623,6 +633,14 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=Fals output_names=['onnx_output'], # the model's output names dynamic_axes={'onnx_input': {0: 'n'}, # variable lenght axes 'onnx_output': {0: 'n'}}) + # This is a hack due to https://github.com/pytorch/pytorch/issues/72175 (see above) + if "cuda" in self.device: + self.model.cuda_ = True + if hasattr(self.model.layers[x], 'gcn'): + self.model.layers[x].gcn.cuda_ = True + elif hasattr(self.model.layers[x], 'cuda_'): + self.model.layers[x].cuda_ = True + self.model.cuda(self.output_device) def save(self, path, model_name='', verbose=True): """ diff --git a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py index 5b467337a9..c27100802a 100644 --- a/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py +++ b/src/opendr/perception/skeleton_based_action_recognition/spatio_temporal_gcn_learner.py @@ -572,7 +572,17 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=Fals n = self.batch_size onnx_input = torch.randn(n, c, t, v, m) if "cuda" in self.device: - onnx_input = Variable(onnx_input.float().cuda(self.output_device), requires_grad=False) + print("[WARN] Temporarily moving model to CPU for ONNX exporting.") + # This is a hack due to https://github.com/pytorch/pytorch/issues/72175 + # Some parts of the model do not make it to GPU, exporting it through CPU + self.model.cpu() + self.model.cuda_ = False + for x in self.model.layers: + if hasattr(self.model.layers[x], 'gcn'): + self.model.layers[x].gcn.cuda_ = False + elif hasattr(self.model.layers[x], 'cuda_'): + self.model.layers[x].cuda_ = False + onnx_input = Variable(onnx_input.float(), requires_grad=False) else: onnx_input = Variable(onnx_input.float(), requires_grad=False) # torch_out = self.model(onnx_input) @@ -589,6 +599,15 @@ def __convert_to_onnx(self, output_name, do_constant_folding=False, verbose=Fals dynamic_axes={'onnx_input': {0: 'n'}, # variable lenght axes 'onnx_output': {0: 'n'}}) + # This is a hack due to https://github.com/pytorch/pytorch/issues/72175 (see above) + if "cuda" in self.device: + self.model.cuda_ = True + if hasattr(self.model.layers[x], 'gcn'): + self.model.layers[x].gcn.cuda_ = True + elif hasattr(self.model.layers[x], 'cuda_'): + self.model.layers[x].cuda_ = True + self.model.cuda(self.output_device) + def save(self, path, model_name='', verbose=True): """ This method is used to save a trained model. diff --git a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py index bfdc8432ac..26c38cc6e2 100644 --- a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py @@ -111,7 +111,7 @@ def test_infer(self): assert all([torch.isclose(torch.sum(r.confidence), torch.tensor(1.0)) for r in results1]) # Input is Image - results2 = self.learner.infer(Image(batch[0], dtype=np.float)) + results2 = self.learner.infer([Image(batch[0], dtype=np.float32), Image(batch[1], dtype=np.float32)]) assert torch.allclose(results1[0].confidence, results2[0].confidence, atol=1e-6) # Input is List[Image] From 6dd060e887e1a39d06359cdfb8c7e4dddc91ca4d Mon Sep 17 00:00:00 2001 From: LukasHedegaard Date: Mon, 21 Mar 2022 09:24:06 +0100 Subject: [PATCH 66/69] Increase tollerance in test_cox3d_learner --- .../activity_recognition/cox3d/test_cox3d_learner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py index 26c38cc6e2..6d56da46e7 100644 --- a/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py +++ b/tests/sources/tools/perception/activity_recognition/cox3d/test_cox3d_learner.py @@ -112,11 +112,11 @@ def test_infer(self): # Input is Image results2 = self.learner.infer([Image(batch[0], dtype=np.float32), Image(batch[1], dtype=np.float32)]) - assert torch.allclose(results1[0].confidence, results2[0].confidence, atol=1e-6) + assert torch.allclose(results1[0].confidence, results2[0].confidence, atol=1e-4) # Input is List[Image] results3 = self.learner.infer([Image(v, dtype=np.float) for v in batch]) - assert all([torch.allclose(r1.confidence, r3.confidence, atol=1e-6) for (r1, r3) in zip(results1, results3)]) + assert all([torch.allclose(r1.confidence, r3.confidence, atol=1e-4) for (r1, r3) in zip(results1, results3)]) def test_optimize(self): self.learner.ort_session = None From a9ad934a183fa55202318395a493e55035364b6f Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Tue, 22 Mar 2022 06:45:00 +0200 Subject: [PATCH 67/69] Test dockerfile with correct branch --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 8ee99caa7b..ebaeef3322 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ RUN chmod +x /tini ENTRYPOINT ["/tini", "--"] # Clone the repo and install the toolkit -RUN git clone --depth 1 --recurse-submodules -j8 https://github.com/opendr-eu/opendr +RUN git clone --depth 1 --recurse-submodules -j8 https://github.com/opendr-eu/opendr -b cuda_upgrade WORKDIR "/opendr" RUN ./bin/install.sh From db7314f8cd95a5f83d06a6824839712995bcf6ab Mon Sep 17 00:00:00 2001 From: Nikolaos Passalis Date: Tue, 22 Mar 2022 06:56:54 +0200 Subject: [PATCH 68/69] Removed comment --- Dockerfile-cuda | 1 - 1 file changed, 1 deletion(-) diff --git a/Dockerfile-cuda b/Dockerfile-cuda index 9aeeec4eec..1ea074ee83 100644 --- a/Dockerfile-cuda +++ b/Dockerfile-cuda @@ -11,7 +11,6 @@ ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini RUN chmod +x /tini ENTRYPOINT ["/tini", "--"] -# Avoid switching back to gcc9 when install build-essential RUN sudo apt-get --yes install build-essential # Clone the repo and install the toolkit From 1529060eca5c1bcebeb1e248e6e0fbdc809ba2e8 Mon Sep 17 00:00:00 2001 From: ad-daniel Date: Tue, 22 Mar 2022 11:23:10 +0100 Subject: [PATCH 69/69] remove branch from docker files --- Dockerfile | 2 +- Dockerfile-cuda | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index ebaeef3322..8ee99caa7b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,7 +12,7 @@ RUN chmod +x /tini ENTRYPOINT ["/tini", "--"] # Clone the repo and install the toolkit -RUN git clone --depth 1 --recurse-submodules -j8 https://github.com/opendr-eu/opendr -b cuda_upgrade +RUN git clone --depth 1 --recurse-submodules -j8 https://github.com/opendr-eu/opendr WORKDIR "/opendr" RUN ./bin/install.sh diff --git a/Dockerfile-cuda b/Dockerfile-cuda index 1ea074ee83..96fb339ad8 100644 --- a/Dockerfile-cuda +++ b/Dockerfile-cuda @@ -15,7 +15,7 @@ RUN sudo apt-get --yes install build-essential # Clone the repo and install the toolkit ENV OPENDR_DEVICE gpu -RUN git clone --depth 1 --recurse-submodules -j8 https://github.com/opendr-eu/opendr -b cuda_upgrade +RUN git clone --depth 1 --recurse-submodules -j8 https://github.com/opendr-eu/opendr WORKDIR "/opendr" RUN ./bin/install.sh