Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FIX: Numpy pre-release accommodations #700

Merged
merged 25 commits into from
Dec 31, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
0a1ea75
FIX: Immutable buffers should not be set writeable
effigies Dec 10, 2018
81fe543
Revert "FIX: Immutable buffers should not be set writeable"
effigies Dec 12, 2018
c0440e0
MAINT: Add bz2file dependency for Python 2
effigies Dec 12, 2018
2452a0f
RF: Drop SAFE_STRINGERS, depend on bz2file to provide .readinto()
effigies Dec 12, 2018
81e775f
CI: Upgrade setuptools
effigies Dec 12, 2018
4977b45
RF: Circumvents a deprecation warning from `np.fromstring`
arokem Dec 17, 2018
53d9413
RF: fromstring => frombuffer in externals.netcdf.
arokem Dec 17, 2018
5d11c9e
RF: fromstring => frombuffer in gifti/cifti2
arokem Dec 17, 2018
b7e4850
RF: fromstring => frombuffer.
arokem Dec 17, 2018
8c0b21d
BF: Use a writeable bytearray for the header data.
arokem Dec 17, 2018
a696d0d
NF: Implement a `readinto` method for Opener.
arokem Dec 17, 2018
a97b067
MAINT: Move to setuptools exclusively
effigies Dec 21, 2018
c79a291
FIX: Use bz2file for Python 2
effigies Dec 21, 2018
62b8cc9
FIX: Rely on openers.BZ2File to provide correct class
effigies Dec 21, 2018
28412d9
CI: Upgrade pip, setuptools, wheel on AppVeyor
effigies Dec 21, 2018
d7a619f
TEST: fobj string assumptions need only apply to bytearrays
effigies Dec 21, 2018
f5eafcf
FIX: Load TckFile streamlines using bytearray
effigies Dec 21, 2018
8209664
TEST: Return mutable bytearray from trk_with_bytes
effigies Dec 21, 2018
e6f58cb
Merge remote-tracking branch 'arokem/fromstring_deprecation' into fix…
effigies Dec 21, 2018
ed42db3
FIX: Truncate buffer on EOF
effigies Dec 21, 2018
24cba1b
CI: Try alternative pip install
effigies Dec 21, 2018
03086a4
FIX: Use io.BytesIO to test TckFile
effigies Dec 21, 2018
d4f4477
STY: Drop unused variable
effigies Dec 21, 2018
7cf14a3
RF: Leave nisext alone, pop install_requires
effigies Dec 22, 2018
ea1b0cc
DOC: Add a comment explaining readinto/frombuffer idiom
effigies Dec 22, 2018
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ before_install:
- virtualenv --python=python venv
- source venv/bin/activate
- python --version # just to check
- pip install -U pip wheel # needed at one point
- pip install -U pip setuptools>=27.0 wheel
- retry pip install nose flake8 mock # always
- pip install $EXTRA_PIP_FLAGS $DEPENDS $OPTIONAL_DEPENDS
- if [ "${COVERAGE}" == "1" ]; then
Expand Down
1 change: 1 addition & 0 deletions appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ install:
- SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH%

# Install the dependencies of the project.
- python -m pip install --upgrade pip setuptools wheel
- pip install numpy scipy matplotlib nose h5py mock pydicom
- pip install .
- SET NIBABEL_DATA_DIR=%CD%\nibabel-data
Expand Down
4 changes: 2 additions & 2 deletions nibabel/cifti2/tests/test_cifti2.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def test_cifti2_metadata():
assert_equal(md.data, dict(metadata_test))

assert_equal(list(iter(md)), list(iter(collections.OrderedDict(metadata_test))))

md.update({'a': 'aval', 'b': 'bval'})
assert_equal(md.data, dict(metadata_test))

Expand Down Expand Up @@ -310,7 +310,7 @@ def test_matrix():

assert_raises(ci.Cifti2HeaderError, m.insert, 0, mim_none)
assert_equal(m.mapped_indices, [])

h = ci.Cifti2Header(matrix=m)
assert_equal(m.mapped_indices, [])
m.insert(0, mim_0)
Expand Down
16 changes: 8 additions & 8 deletions nibabel/externals/netcdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@

import numpy as np # noqa
from ..py3k import asbytes, asstr
from numpy import fromstring, ndarray, dtype, empty, array, asarray
from numpy import frombuffer, ndarray, dtype, empty, array, asarray
from numpy import little_endian as LITTLE_ENDIAN
from functools import reduce

Expand Down Expand Up @@ -519,7 +519,7 @@ def _read(self):
if not magic == b'CDF':
raise TypeError("Error: %s is not a valid NetCDF 3 file" %
self.filename)
self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0]
self.__dict__['version_byte'] = frombuffer(self.fp.read(1), '>b')[0]

# Read file headers and set data.
self._read_numrecs()
Expand Down Expand Up @@ -608,7 +608,7 @@ def _read_var_array(self):
# Calculate size to avoid problems with vsize (above)
a_size = reduce(mul, shape, 1) * size
if self.file_bytes >= 0 and begin_ + a_size > self.file_bytes:
data = fromstring(b'\x00'*a_size, dtype=dtype_)
data = frombuffer(b'\x00'*a_size, dtype=dtype_)
elif self.use_mmap:
mm = mmap(self.fp.fileno(), begin_+a_size, access=ACCESS_READ)
data = ndarray.__new__(ndarray, shape, dtype=dtype_,
Expand All @@ -622,7 +622,7 @@ def _read_var_array(self):
buf = self.fp.read(a_size)
if len(buf) < a_size:
buf = b'\x00'*a_size
data = fromstring(buf, dtype=dtype_)
data = frombuffer(buf, dtype=dtype_)
data.shape = shape
self.fp.seek(pos)

Expand All @@ -644,7 +644,7 @@ def _read_var_array(self):
else:
pos = self.fp.tell()
self.fp.seek(begin)
rec_array = fromstring(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array = frombuffer(self.fp.read(self._recs*self._recsize), dtype=dtypes)
rec_array.shape = (self._recs,)
self.fp.seek(pos)

Expand Down Expand Up @@ -687,7 +687,7 @@ def _read_values(self):
self.fp.read(-count % 4) # read padding

if typecode is not 'c':
values = fromstring(values, dtype='>%s' % typecode)
values = frombuffer(values, dtype='>%s' % typecode)
if values.shape == (1,):
values = values[0]
else:
Expand All @@ -705,14 +705,14 @@ def _pack_int(self, value):
_pack_int32 = _pack_int

def _unpack_int(self):
return int(fromstring(self.fp.read(4), '>i')[0])
return int(frombuffer(self.fp.read(4), '>i')[0])
_unpack_int32 = _unpack_int

def _pack_int64(self, value):
self.fp.write(array(value, '>q').tostring())

def _unpack_int64(self):
return fromstring(self.fp.read(8), '>q')[0]
return frombuffer(self.fp.read(8), '>q')[0]

def _pack_string(self, s):
count = len(s)
Expand Down
4 changes: 2 additions & 2 deletions nibabel/gifti/parse_gifti_fast.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data):
dec = base64.b64decode(data.encode('ascii'))
dt = data_type_codes.type[datatype]
sh = tuple(shape)
newarr = np.fromstring(dec, dtype=dt)
newarr = np.frombuffer(dec, dtype=dt)
if len(newarr.shape) != len(sh):
newarr = newarr.reshape(sh, order=ord)

Expand All @@ -59,7 +59,7 @@ def read_data_block(encoding, endian, ordering, datatype, shape, data):
zdec = zlib.decompress(dec)
dt = data_type_codes.type[datatype]
sh = tuple(shape)
newarr = np.fromstring(zdec, dtype=dt)
newarr = np.frombuffer(zdec, dtype=dt)
if len(newarr.shape) != len(sh):
newarr = newarr.reshape(sh, order=ord)

Expand Down
3 changes: 2 additions & 1 deletion nibabel/info.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,4 +209,5 @@ def cmp_pkg_version(version_str, pkg_version_str=__version__):
ISRELEASE = _version_extra == ''
VERSION = __version__
PROVIDES = ["nibabel", 'nisext']
REQUIRES = ["numpy (>=%s)" % NUMPY_MIN_VERSION]
REQUIRES = ["numpy>=%s" % NUMPY_MIN_VERSION,
'bz2file; python_version < "3.0"']
2 changes: 1 addition & 1 deletion nibabel/nifti1.py
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ def from_fileobj(klass, fileobj, size, byteswap):
# otherwise there should be a full extension header
if not len(ext_def) == 8:
raise HeaderDataError('failed to read extension header')
ext_def = np.fromstring(ext_def, dtype=np.int32)
ext_def = np.frombuffer(ext_def, dtype=np.int32)
if byteswap:
ext_def = ext_def.byteswap()
# be extra verbose
Expand Down
11 changes: 9 additions & 2 deletions nibabel/openers.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,11 @@
""" Context manager openers for various fileobject types
"""

import bz2
import sys
if sys.version_info[0] < 3:
from bz2file import BZ2File
else:
from bz2 import BZ2File
import gzip
import sys
import warnings
Expand Down Expand Up @@ -127,7 +131,7 @@ class Opener(object):
for \*args
"""
gz_def = (_gzip_open, ('mode', 'compresslevel', 'keep_open'))
bz2_def = (bz2.BZ2File, ('mode', 'buffering', 'compresslevel'))
bz2_def = (BZ2File, ('mode', 'buffering', 'compresslevel'))
compress_ext_map = {
'.gz': gz_def,
'.bz2': bz2_def,
Expand Down Expand Up @@ -209,6 +213,9 @@ def fileno(self):
def read(self, *args, **kwargs):
return self.fobj.read(*args, **kwargs)

def readinto(self, *args, **kwargs):
return self.fobj.readinto(*args, **kwargs)

def write(self, *args, **kwargs):
return self.fobj.write(*args, **kwargs)

Expand Down
17 changes: 9 additions & 8 deletions nibabel/streamlines/tck.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,18 +405,21 @@ def _read(cls, fileobj, header, buffer_size=4):
n_streams = 0

while not eof:
buff = bytearray(buffer_size)
n_read = f.readinto(buff)
eof = n_read != buffer_size
if eof:
buff = buff[:n_read]

bytes_read = f.read(buffer_size)
buffs.append(bytes_read)
eof = len(bytes_read) != buffer_size
buffs.append(buff)

# Make sure we've read enough to find a streamline delimiter.
if fiber_marker not in bytes_read:
if fiber_marker not in buff:
# If we've read the whole file, then fail.
if eof:
# Could have minimal buffering, and have read only the
# EOF delimiter
buffs = [b''.join(buffs)]
buffs = [bytearray().join(buffs)]
if not buffs[0] == eof_marker:
raise DataError(
"Cannot find a streamline delimiter. This file"
Expand All @@ -425,15 +428,13 @@ def _read(cls, fileobj, header, buffer_size=4):
# Otherwise read a bit more.
continue

all_parts = b''.join(buffs).split(fiber_marker)
all_parts = bytearray().join(buffs).split(fiber_marker)
point_parts, buffs = all_parts[:-1], all_parts[-1:]
point_parts = [p for p in point_parts if p != b'']

for point_part in point_parts:
# Read floats.
pts = np.frombuffer(point_part, dtype=dtype)
# Enforce ability to write to underlying bytes object
pts.flags.writeable = True
# Convert data to little-endian if needed.
yield pts.astype('<f4', copy=False).reshape([-1, 3])

Expand Down
2 changes: 1 addition & 1 deletion nibabel/streamlines/tests/test_tck.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
from os.path import join as pjoin

from six import BytesIO
from io import BytesIO
from nibabel.py3k import asbytes

from ..array_sequence import ArraySequence
Expand Down
2 changes: 1 addition & 1 deletion nibabel/streamlines/tests/test_trk.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ def test_load_complex_file(self):
def trk_with_bytes(self, trk_key='simple_trk_fname', endian='<'):
""" Return example trk file bytes and struct view onto bytes """
with open(DATA[trk_key], 'rb') as fobj:
trk_bytes = fobj.read()
trk_bytes = bytearray(fobj.read())
dt = trk_module.header_2_dtype.newbyteorder(endian)
trk_struct = np.ndarray((1,), dt, buffer=trk_bytes)
trk_struct.flags.writeable = True
Expand Down
10 changes: 5 additions & 5 deletions nibabel/streamlines/trk.py
Original file line number Diff line number Diff line change
Expand Up @@ -556,11 +556,11 @@ def _read_header(fileobj):
start_position = fileobj.tell() if hasattr(fileobj, 'tell') else None

with Opener(fileobj) as f:

# Read the header in one block.
header_str = f.read(header_2_dtype.itemsize)
header_rec = np.fromstring(string=header_str, dtype=header_2_dtype)

# Reading directly from a file into a (mutable) bytearray enables a zero-copy
# cast to a mutable numpy object with frombuffer
header_buf = bytearray(header_2_dtype.itemsize)
f.readinto(header_buf)
header_rec = np.frombuffer(buffer=header_buf, dtype=header_2_dtype)
# Check endianness
endianness = native_code
if header_rec['hdr_size'] != TrkFile.HEADER_SIZE:
Expand Down
3 changes: 1 addition & 2 deletions nibabel/tests/test_openers.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,11 @@
import os
import contextlib
from gzip import GzipFile
from bz2 import BZ2File
from io import BytesIO, UnsupportedOperation
from distutils.version import StrictVersion

from ..py3k import asstr, asbytes
from ..openers import Opener, ImageOpener, HAVE_INDEXED_GZIP
from ..openers import Opener, ImageOpener, HAVE_INDEXED_GZIP, BZ2File
from ..tmpdirs import InTemporaryDirectory
from ..volumeutils import BinOpener

Expand Down
12 changes: 7 additions & 5 deletions nibabel/tests/test_volumeutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@
_dt_min_max,
_write_data,
)
from ..openers import Opener
from ..openers import Opener, BZ2File
from ..casting import (floor_log2, type_info, OK_FLOATS, shared_range)

from numpy.testing import (assert_array_almost_equal,
Expand All @@ -71,7 +71,7 @@ def test__is_compressed_fobj():
with InTemporaryDirectory():
for ext, opener, compressed in (('', open, False),
('.gz', gzip.open, True),
('.bz2', bz2.BZ2File, True)):
('.bz2', BZ2File, True)):
fname = 'test.bin' + ext
for mode in ('wb', 'rb'):
fobj = opener(fname, mode)
Expand All @@ -94,7 +94,7 @@ def make_array(n, bytes):
with InTemporaryDirectory():
for n, opener in itertools.product(
(256, 1024, 2560, 25600),
(open, gzip.open, bz2.BZ2File)):
(open, gzip.open, BZ2File)):
in_arr = np.arange(n, dtype=dtype)
# Write array to file
fobj_w = opener(fname, 'wb')
Expand All @@ -103,7 +103,8 @@ def make_array(n, bytes):
# Read back from file
fobj_r = opener(fname, 'rb')
try:
contents1 = fobj_r.read()
contents1 = bytearray(4 * n)
fobj_r.readinto(contents1)
# Second element is 1
assert_false(contents1[0:8] == b'\x00' * 8)
out_arr = make_array(n, contents1)
Expand All @@ -114,7 +115,8 @@ def make_array(n, bytes):
assert_equal(contents1[:8], b'\x00' * 8)
# Reread, to get unmodified contents
fobj_r.seek(0)
contents2 = fobj_r.read()
contents2 = bytearray(4 * n)
fobj_r.readinto(contents2)
out_arr2 = make_array(n, contents2)
assert_array_equal(in_arr, out_arr2)
assert_equal(out_arr[1], 0)
Expand Down
10 changes: 3 additions & 7 deletions nibabel/volumeutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
import sys
import warnings
import gzip
import bz2
from collections import OrderedDict
from os.path import exists, splitext
from operator import mul
Expand All @@ -21,7 +20,7 @@
import numpy as np

from .casting import (shared_range, type_info, OK_FLOATS)
from .openers import Opener
from .openers import Opener, BZ2File
from .deprecated import deprecate_with_version
from .externals.oset import OrderedSet

Expand All @@ -40,10 +39,7 @@
default_compresslevel = 1

#: file-like classes known to hold compressed data
COMPRESSED_FILE_LIKES = (gzip.GzipFile, bz2.BZ2File)

#: file-like classes known to return string values that are safe to modify
SAFE_STRINGERS = (gzip.GzipFile, bz2.BZ2File)
COMPRESSED_FILE_LIKES = (gzip.GzipFile, BZ2File)


class Recoder(object):
Expand Down Expand Up @@ -530,7 +526,7 @@ def array_from_file(shape, in_dtype, infile, offset=0, order='F', mmap=True):
else:
data_bytes = infile.read(n_bytes)
n_read = len(data_bytes)
needs_copy = not isinstance(infile, SAFE_STRINGERS)
needs_copy = True
if n_bytes != n_read:
raise IOError('Expected {0} bytes, got {1} bytes from {2}\n'
' - could the file be damaged?'.format(
Expand Down
13 changes: 4 additions & 9 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,14 +19,7 @@
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')

# For some commands, use setuptools.
if len(set(('develop', 'bdist_egg', 'bdist_rpm', 'bdist', 'bdist_dumb',
'install_egg_info', 'egg_info', 'easy_install', 'bdist_wheel',
'bdist_mpkg')).intersection(sys.argv)) > 0:
# setup_egg imports setuptools setup, thus monkeypatching distutils.
import setup_egg # noqa

from distutils.core import setup
from setuptools import setup

# Commit hash writing, and dependency checking
from nisext.sexts import (get_comrec_build, package_check, install_scripts_bat,
Expand Down Expand Up @@ -77,8 +70,8 @@ def main(**extra_args):
author_email=INFO.AUTHOR_EMAIL,
platforms=INFO.PLATFORMS,
version=INFO.VERSION,
requires=INFO.REQUIRES,
provides=INFO.PROVIDES,
install_requires=INFO.REQUIRES,
packages = ['nibabel',
'nibabel.externals',
'nibabel.externals.tests',
Expand Down Expand Up @@ -127,4 +120,6 @@ def main(**extra_args):


if __name__ == "__main__":
# Do not use nisext's dynamically updated install_requires
extra_setuptools_args.pop('install_requires', None)
main(**extra_setuptools_args)