Skip to content

Commit 473a061

Browse files
author
Marian Rassat
committed
misc: removed numba from deps, removed mentions of numba in comments
1 parent 577b721 commit 473a061

File tree

18 files changed

+85
-130
lines changed

18 files changed

+85
-130
lines changed

.github/workflows/pandora_ci.yml

-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ jobs:
2626
2727
- name: Test with pytest
2828
run: |
29-
export NUMBA_DISABLE_JIT="1"
3029
pytest -m "not notebook_tests" --junitxml=pytest-report.xml --cov-config=.coveragerc --cov-report xml --cov
3130
3231
- name: Upload coverage to Codecov

NOTICE

-5
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,6 @@ Copyright (C) 2016 Jason R Coombs <[email protected]>
2727
Website: https://setuptools.readthedocs.io/
2828
License: MIT.
2929

30-
Numba: NumPy aware dynamic Python compiler using LLVM
31-
Copyright (c) 2012, Anaconda, Inc.
32-
Website: https://numba.pydata.org/
33-
License: BSD 2 Clause.
34-
3530
Xarray: N-D labeled arrays and datasets in Python
3631
Copyright 2014-2019, xarray Developers
3732
Website: http://xarray.pydata.org

docs/source/getting_started.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ Credits
6464

6565
Pandora uses `transitions <https://github.com/pytransitions/transitions>`_ to manage the pipelines one can create.
6666
Images I/O are provided by `rasterio <https://github.com/mapbox/rasterio>`_ and we use `xarray <https://github.com/pydata/xarray>`_
67-
to handle 3D Cost Volumes with few `numba <https://github.com/numba/numba>`_ optimisations.
67+
to handle 3D Cost Volumes.
6868

6969
Our data test sample is based on the 2003 Middleburry dataset [Scharstein2003]_.
7070

docs/source/userguide/faq.rst

-12
Original file line numberDiff line numberDiff line change
@@ -120,15 +120,3 @@ And the python script.
120120
121121
# Check datasets: shape, format and content
122122
check_datasets(img_left, img_right)
123-
124-
How can I disable numba parallelization?
125-
****************************************
126-
127-
Some functions of Pandora are parallelized using the numba package. To prevent the use of this tool, it is possible to set
128-
an environment variable named **PANDORA_NUMBA_PARALLEL** to **false**.
129-
130-
How can I disable numba cache?
131-
******************************
132-
133-
Some Pandora functions have a cache of the numba package. This cache can improve execution speed. To do this, you can set
134-
an environment variable named **PANDORA_NUMBA_PAR** to **true**.

mypy.ini

-2
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,6 @@ mypy_path = stubs
1717
ignore_missing_imports = True
1818
[mypy-setuptools.*]
1919
ignore_missing_imports = True
20-
[mypy-numba.*]
21-
ignore_missing_imports = True
2220
[mypy-json_checker.*]
2321
ignore_missing_imports = True
2422
[mypy-scipy.*]

notebooks/notebook_requirements.txt

-2
Original file line numberDiff line numberDiff line change
@@ -4,8 +4,6 @@ xarray>=0.13.0
44
scipy
55
rasterio
66
json-checker
7-
numba>=0.55.2;python_version>'3.7'
8-
numba>=0.47.0;python_version<'3.8'
97
transitions
108
scikit-image>=0.19.0
119
jupyter-dash

pandora/aggregation/cbca.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def computes_cross_supports(
241241
left_masked = filter_.median_filter(left_masked) # type: ignore
242242
# Convert nan to inf to be able to use the comparison operators < and > in cross_support function
243243
np.nan_to_num(left_masked, copy=False, nan=np.inf)
244-
# Compute left cross support using numba to reduce running time
244+
# Compute left cross support using C++ to reduce running time
245245
if offset != 0:
246246
# Cross support to the size of the cost volume
247247
cross_left = aggregation_cpp.cross_support(
@@ -289,7 +289,7 @@ def computes_cross_supports(
289289
right_masked = filter_.median_filter(right_masked) # type: ignore
290290
# Convert nan to inf to be able to use the comparison operators < and > in cross_support function
291291
np.nan_to_num(right_masked, copy=False, nan=np.inf)
292-
# Compute right cross support using numba to reduce running time
292+
# Compute right cross support using C++ to reduce running time
293293
if offset != 0:
294294
# Cross support to the size of the cost volume
295295
curr_c_r = aggregation_cpp.cross_support(

pandora/aggregation/cpp/aggregation_cpp.pyi

-1
Original file line numberDiff line numberDiff line change
@@ -86,4 +86,3 @@ def cross_support(image, len_arms, intensity):
8686
:rtype: 3D np.array ( row, col, [left, right, top, bot] ), dtype=np.int16
8787
"""
8888
...
89-

pandora/cost_volume_confidence/ambiguity.py

+3-7
Original file line numberDiff line numberDiff line change
@@ -135,13 +135,9 @@ def confidence_prediction(
135135

136136
type_measure_min = cv.attrs["type_measure"] == "min"
137137

138-
# This silences numba's TBB threading layer warning
139-
with warnings.catch_warnings():
140-
warnings.filterwarnings("ignore")
141-
# Computes ambiguity using numba in parallel for memory and computation time optimization
142-
ambiguity = cost_volume_confidence_cpp.compute_ambiguity(
143-
cv["cost_volume"].data, self._etas, self._nbr_etas, grids, disparity_range, type_measure_min
144-
)
138+
ambiguity = cost_volume_confidence_cpp.compute_ambiguity(
139+
cv["cost_volume"].data, self._etas, self._nbr_etas, grids, disparity_range, type_measure_min
140+
)
145141

146142
# If activated, ambiguity normalization with percentile
147143
if self._normalization:

pandora/cost_volume_confidence/interval_bounds.py

+21-24
Original file line numberDiff line numberDiff line change
@@ -157,31 +157,28 @@ def confidence_prediction(
157157
else:
158158
type_factor = 1.0
159159

160-
# This silences numba's TBB threading layer warning
161-
with warnings.catch_warnings():
162-
warnings.filterwarnings("ignore")
163-
# Computes interval bounds using numpy
164-
interval_bound_inf, interval_bound_sup = self.compute_interval_bounds(
165-
cv["cost_volume"].data, cv["disp"].data.astype(np.float32), self._possibility_threshold, type_factor
160+
# Computes interval bounds using numpy
161+
interval_bound_inf, interval_bound_sup = self.compute_interval_bounds(
162+
cv["cost_volume"].data, cv["disp"].data.astype(np.float32), self._possibility_threshold, type_factor
163+
)
164+
if self._regularization:
165+
indicator = (
166+
"confidence_from_ambiguity"
167+
if (self._ambiguity_indicator == "")
168+
else "confidence_from_ambiguity." + self._ambiguity_indicator
166169
)
167-
if self._regularization:
168-
indicator = (
169-
"confidence_from_ambiguity"
170-
if (self._ambiguity_indicator == "")
171-
else "confidence_from_ambiguity." + self._ambiguity_indicator
172-
)
173-
interval_bound_inf, interval_bound_sup, _ = interval_regularization(
174-
interval_bound_inf,
175-
interval_bound_sup,
176-
cv.confidence_measure.sel({"indicator": indicator}).data,
177-
self._ambiguity_threshold,
178-
self._ambiguity_kernel_size,
179-
self._vertical_depth,
180-
self._quantile_regularization,
181-
)
182-
# For empty cost volume, the interval gets its max length
183-
# interval_bound_inf[np.isnan(interval_bound_inf)] = cv["disp"].data.astype(np.float32)[0]
184-
# interval_bound_sup[np.isnan(interval_bound_sup)] = cv["disp"].data.astype(np.float32)[-1]
170+
interval_bound_inf, interval_bound_sup, _ = interval_regularization(
171+
interval_bound_inf,
172+
interval_bound_sup,
173+
cv.confidence_measure.sel({"indicator": indicator}).data,
174+
self._ambiguity_threshold,
175+
self._ambiguity_kernel_size,
176+
self._vertical_depth,
177+
self._quantile_regularization,
178+
)
179+
# For empty cost volume, the interval gets its max length
180+
# interval_bound_inf[np.isnan(interval_bound_inf)] = cv["disp"].data.astype(np.float32)[0]
181+
# interval_bound_sup[np.isnan(interval_bound_sup)] = cv["disp"].data.astype(np.float32)[-1]
185182

186183
disp, cv = self.allocate_confidence_map(self._indicator_inf, interval_bound_inf, disp, cv)
187184
disp, cv = self.allocate_confidence_map(self._indicator_sup, interval_bound_sup, disp, cv)

pandora/cost_volume_confidence/risk.py

+20-25
Original file line numberDiff line numberDiff line change
@@ -130,31 +130,26 @@ def confidence_prediction(
130130
)
131131
# Get disparity intervals parameters
132132
disparity_range = cv["disp"].data.astype(np.float32)
133-
# This silences numba's TBB threading layer warning
134-
with warnings.catch_warnings():
135-
warnings.filterwarnings("ignore")
136-
137-
_, sampled_ambiguity = cost_volume_confidence_cpp.compute_ambiguity_and_sampled_ambiguity(
138-
cv["cost_volume"].data, self._etas, self._nbr_etas, grids, disparity_range
139-
)
140-
141-
if "global_disparity" in img_left.attrs:
142-
sampled_ambiguity = self.normalize_with_extremum(sampled_ambiguity, img_left, self._nbr_etas)
143-
logging.info(
144-
"You are using normalization by \n a specific case with the instantiation of global_disparity"
145-
)
146-
# in case of cross correlation
147-
elif "global_disparity" in img_right.attrs:
148-
sampled_ambiguity = self.normalize_with_extremum(sampled_ambiguity, img_right, self._nbr_etas)
149-
150-
risk_max, risk_min = self.compute_risk(
151-
cv["cost_volume"].data,
152-
sampled_ambiguity,
153-
self._etas,
154-
self._nbr_etas,
155-
grids,
156-
disparity_range,
157-
)
133+
134+
_, sampled_ambiguity = cost_volume_confidence_cpp.compute_ambiguity_and_sampled_ambiguity(
135+
cv["cost_volume"].data, self._etas, self._nbr_etas, grids, disparity_range
136+
)
137+
138+
if "global_disparity" in img_left.attrs:
139+
sampled_ambiguity = self.normalize_with_extremum(sampled_ambiguity, img_left, self._nbr_etas)
140+
logging.info("You are using normalization by \n a specific case with the instantiation of global_disparity")
141+
# in case of cross correlation
142+
elif "global_disparity" in img_right.attrs:
143+
sampled_ambiguity = self.normalize_with_extremum(sampled_ambiguity, img_right, self._nbr_etas)
144+
145+
risk_max, risk_min = self.compute_risk(
146+
cv["cost_volume"].data,
147+
sampled_ambiguity,
148+
self._etas,
149+
self._nbr_etas,
150+
grids,
151+
disparity_range,
152+
)
158153

159154
disp, cv = self.allocate_confidence_map(self._indicator_max, risk_max, disp, cv)
160155
disp, cv = self.allocate_confidence_map(self._indicator_min, risk_min, disp, cv)

pandora/cpp/img_tools_cpp.pyi

+1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ def find_valid_neighbors(dirs, disp, valid, row, col, msk_pixel_invalid):
2222
:rtype: 2D np.ndarray
2323
"""
2424
...
25+
2526
def interpolate_nodata_sgm(
2627
img: np.ndarray, valid: np.ndarray, msk_pixel_invalid: int, msk_pixel_filled_nodata: int
2728
) -> Tuple[np.ndarray, np.ndarray]:

pandora/img_tools.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -552,7 +552,7 @@ def fill_nodata_image(dataset: xr.Dataset) -> Tuple[np.ndarray, np.ndarray]:
552552
img = dataset["im"].data
553553
msk = dataset["msk"].data
554554
nband = dataset["im"].data.shape[0]
555-
# We call the function for each band because of numba
555+
556556
for band in range(nband):
557557
img[band, :, :], msk[:, :] = interpolate_nodata_sgm(
558558
dataset["im"].data[band, :, :],

pandora/refinement/cpp/refinement_cpp.pyi

+1
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,7 @@ def loop_approximate_refinement(
100100
:rtype: tuple(2D numpy array (row, col), 2D numpy array (row, col), 2D numpy array (row, col))
101101
"""
102102
...
103+
103104
def vfit_refinement_method(
104105
cost, disp, measure, cst_pandora_msk_pixel_stopped_interpolation
105106
) -> tuple[float, float, int]:

pandora/refinement/refinement.py

+32-40
Original file line numberDiff line numberDiff line change
@@ -96,26 +96,22 @@ def subpixel_refinement(self, cv: xr.Dataset, disp: xr.Dataset) -> None:
9696
subpixel = cv.attrs["subpixel"]
9797
measure = cv.attrs["type_measure"]
9898

99-
# This silences numba's TBB threading layer warning
100-
with warnings.catch_warnings():
101-
warnings.filterwarnings("ignore")
102-
# Conversion to numpy array ( .data ), because Numba does not support Xarray
103-
(
104-
itp_coeff,
105-
disp["disparity_map"].data,
106-
disp["validity_mask"].data,
107-
) = refinement_cpp.loop_refinement(
108-
cv["cost_volume"].data,
109-
disp["disparity_map"].data,
110-
disp["validity_mask"].data,
111-
d_min,
112-
d_max,
113-
subpixel,
114-
measure,
115-
self.refinement_method,
116-
cst.PANDORA_MSK_PIXEL_INVALID,
117-
cst.PANDORA_MSK_PIXEL_STOPPED_INTERPOLATION,
118-
)
99+
(
100+
itp_coeff,
101+
disp["disparity_map"].data,
102+
disp["validity_mask"].data,
103+
) = refinement_cpp.loop_refinement(
104+
cv["cost_volume"].data,
105+
disp["disparity_map"].data,
106+
disp["validity_mask"].data,
107+
d_min,
108+
d_max,
109+
subpixel,
110+
measure,
111+
self.refinement_method,
112+
cst.PANDORA_MSK_PIXEL_INVALID,
113+
cst.PANDORA_MSK_PIXEL_STOPPED_INTERPOLATION,
114+
)
119115

120116
disp.attrs["refinement"] = self._refinement_method_name
121117
disp["interpolated_coeff"] = xr.DataArray(
@@ -154,26 +150,22 @@ def approximate_subpixel_refinement(self, cv_left: xr.Dataset, disp_right: xr.Da
154150
subpixel = cv_left.attrs["subpixel"]
155151
measure = cv_left.attrs["type_measure"]
156152

157-
# This silences numba's TBB threading layer warning
158-
with warnings.catch_warnings():
159-
warnings.filterwarnings("ignore")
160-
# Conversion to numpy array ( .data ), because Numba does not support Xarray
161-
(
162-
itp_coeff,
163-
disp_right["disparity_map"].data,
164-
disp_right["validity_mask"].data,
165-
) = refinement_cpp.loop_approximate_refinement(
166-
cv_left["cost_volume"].data,
167-
disp_right["disparity_map"].data,
168-
disp_right["validity_mask"].data,
169-
d_min,
170-
d_max,
171-
subpixel,
172-
measure,
173-
self.refinement_method,
174-
cst.PANDORA_MSK_PIXEL_INVALID,
175-
cst.PANDORA_MSK_PIXEL_STOPPED_INTERPOLATION,
176-
)
153+
(
154+
itp_coeff,
155+
disp_right["disparity_map"].data,
156+
disp_right["validity_mask"].data,
157+
) = refinement_cpp.loop_approximate_refinement(
158+
cv_left["cost_volume"].data,
159+
disp_right["disparity_map"].data,
160+
disp_right["validity_mask"].data,
161+
d_min,
162+
d_max,
163+
subpixel,
164+
measure,
165+
self.refinement_method,
166+
cst.PANDORA_MSK_PIXEL_INVALID,
167+
cst.PANDORA_MSK_PIXEL_STOPPED_INTERPOLATION,
168+
)
177169

178170
disp_right.attrs["refinement"] = self._refinement_method_name
179171
disp_right["interpolated_coeff"] = xr.DataArray(

pandora/state_machine.py

+2-5
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,8 @@
6161

6262
from pandora.criteria import validity_mask
6363

64-
# This silences numba's TBB threading layer warning
65-
with warnings.catch_warnings():
66-
warnings.filterwarnings("ignore")
67-
from pandora import validation
68-
from pandora import cost_volume_confidence
64+
from pandora import validation
65+
from pandora import cost_volume_confidence
6966
from .img_tools import prepare_pyramid
7067

7168

pandora/validation/cpp/validation_cpp.pyi

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
# pylint: skip-file
2-
def interpolate_occlusion_sgm(disp, valid, msk_pixel_occlusion, msk_pixel_filled_occlusion, msk_pixel_invalid):
2+
def interpolate_occlusion_sgm(disp, valid, msk_pixel_occlusion, msk_pixel_filled_occlusion, msk_pixel_invalid):
33
"""
44
Interpolation of the left disparity map to resolve occlusion conflicts.
55
Interpolate occlusion by moving by selecting

pyproject.toml

-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,6 @@ dependencies = [
2424
"scipy",
2525
"rasterio",
2626
"json-checker",
27-
"numba>=0.55.2;python_version>'3.7'",
2827
"transitions",
2928
"scikit-image>=0.19.0",
3029
"importlib-metadata;python_version<'3.10'"

0 commit comments

Comments
 (0)