commit 5c4608e35b15aaf53c90cc4bf8b9ef1ca95335fd Author: Blake Griffith Date: Thu Aug 15 22:56:36 2013 -0500 TST: Tests for compatibility with NumPy ufuncs and dot. Note that these tests only run if NumPy version is greater than 1.9. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 886c87a..ef4a6bc 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -18,6 +18,7 @@ Run tests if scipy is installed: Run tests if sparse is not installed: python tests/test_base.py """ +from distutils.version import LooseVersion import warnings @@ -1244,20 +1245,6 @@ class _TestCommon: for x, y in zip(A, B): assert_equal(x.todense(), y) - # Eventually we'd like to allow matrix products between dense - # and sparse matrices using the normal dot() function: - # def test_dense_dot_sparse(self): - # a = array([1.,2.,3.]) - # dense_dot_dense = dot(a, self.dat) - # dense_dot_sparse = dot(a, self.datsp) - # assert_array_equal(dense_dot_dense, dense_dot_sparse) - - # def test_sparse_dot_dense(self): - # b = array([1.,2.,3.,4.]) - # dense_dot_dense = dot(self.dat, b) - # dense_dot_sparse = dot(self.datsp, b) - # assert_array_equal(dense_dot_dense, dense_dot_sparse) - def test_size_zero_matrix_arithmetic(self): # Test basic matrix arithmatic with shapes like (0,0), (10,0), # (0, 3), etc. @@ -1331,6 +1318,56 @@ class _TestCommon: assert_array_equal(spm.todok().A, m) assert_array_equal(spm.tobsr().A, m) + # Eventually we'd like to allow matrix products between dense + # and sparse matrices using the normal dot() function: + # def test_dense_dot_sparse(self): + # a = array([1.,2.,3.]) + # dense_dot_dense = dot(a, self.dat) + # dense_dot_sparse = dot(a, self.datsp) + # assert_array_equal(dense_dot_dense, dense_dot_sparse) + + # def test_sparse_dot_dense(self): + # b = array([1.,2.,3.,4.]) + # dense_dot_dense = dot(self.dat, b) + # dense_dot_sparse = dot(self.datsp, b) + # assert_array_equal(dense_dot_dense, dense_dot_sparse) + + def test_ufunc_overrides(self): + def check(): + #data + a = np.array([[1, 2, 3], + [4, 5, 6], + [7, 8, 9]]) + b = np.array([[9, 8, 7], + [6, 5, 4], + [3, 2, 1]]) + + asp = self.spmatrix(a) + bsp = self.spmatrix(b) + + # associative + # multiply + assert_array_equal(np.multiply(asp, bsp).A, np.multiply(a, b)) + # add + assert_array_equal(np.add(asp, bsp).A, np.add(a, b)) + + # non-associative + # dot + assert_array_equal(np.dot(asp, bsp).A, np.dot(a, b)) + assert_array_equal(np.dot(a, bsp), np.dot(a, b)) + assert_array_equal(np.dot(asp, b), np.dot(a, b)) + # subtract + assert_array_equal(np.subtract(asp, bsp).A, np.subtract(a, b)) + assert_array_equal(np.subtract(a, bsp).A, np.subtract(a, b)) + assert_array_equal(np.subtract(asp, b).A, np.subtract(a, b)) + # divide + assert_array_equal(np.divide(asp, bsp).A, np.divide(a, b)) + assert_array_equal(np.divide(asp, b).A, np.divide(a, b)) + assert_raises(TypeError, np.divide, a, bsp) + + if LooseVersion(np.version.version) > LooseVersion('1.9'): + yield check + class _TestInplaceArithmetic: def test_imul_scalar(self): commit 18aee6561817c6e9101681868fd712fae3a83afe Author: Blake Griffith Date: Thu Aug 15 22:37:48 2013 -0500 ENH: Sparse compatibility with NumPy dot, multiply, add, div, subtract. diff --git a/scipy/sparse/base.py b/scipy/sparse/base.py index dd57329..9eb26cf 100644 --- a/scipy/sparse/base.py +++ b/scipy/sparse/base.py @@ -387,10 +387,17 @@ class spmatrix(object): else: return self.tocsr().__truediv__(other) + def __rtruediv__(self, other): + return NotImplemented + def __div__(self, other): # Always do true division return self.__truediv__(other) + def __rdiv__(self, other): + # Always do true division + return self.__rtruediv__(other) + def __neg__(self): return -self.tocsr() @@ -709,6 +716,52 @@ class spmatrix(object): else: return np.zeros(self.shape, dtype=self.dtype, order=order) + def __numpy_ufunc__(self, func, method, pos, inputs, **kwargs): + """Method for compatibility with NumPy's ufuncs and dot + functions. + """ + if method != '__call__' or kwargs: + return NotImplemented + + without_self = list(inputs) + del without_self[pos] + without_self = tuple(without_self) + + # Associative operations + if func is np.multiply: + return self.multiply(*without_self) + + elif func is np.add: + return self.__add__(*without_self) + + # Non-associative operations + elif func is np.dot: + if pos == 0: + return self.__mul__(inputs[1]) + else: + return self.__rmul__(inputs[0]) + + elif func is np.subtract: + if pos == 0: + return self.__sub__(inputs[1]) + else: + return self.__rsub__(inputs[0]) + + elif func is np.divide: + if pos == 0: + return self.__div__(inputs[1]) + else: + return NotImplemented + + elif func is np.true_divide: + if pos == 0: + return self.__truediv__(inputs[1]) + else: + return NotImplemented + + else: + return NotImplemented + def isspmatrix(x): return isinstance(x, spmatrix) commit dadf49de1789f56fb3040324376e3f5937e3e34c Author: Blake Griffith Date: Thu Aug 15 22:36:31 2013 -0500 BUG: Remove bug where __div__ & __truediv__ didn't upcast dtype. diff --git a/scipy/sparse/base.py b/scipy/sparse/base.py index 71a946a..dd57329 100644 --- a/scipy/sparse/base.py +++ b/scipy/sparse/base.py @@ -377,9 +377,15 @@ class spmatrix(object): def __truediv__(self, other): if isscalarlike(other): - return self * (1./other) + if np.can_cast(self.dtype, np.float_): + return self.astype(np.float_) * (1./other) + else: + return self * (1./other) else: - return self.tocsr().__truediv__(other) + if np.can_cast(self.dtype, np.float_): + return self.astype(np.float_).tocsr().__truediv__(other) + else: + return self.tocsr().__truediv__(other) def __div__(self, other): # Always do true division diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index 81b8d96..d0c4cbe 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -415,13 +415,21 @@ class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): def __truediv__(self,other): if isscalarlike(other): - return self * (1./other) + if np.can_cast(self.dtype, np.float_): + return self.astype(np.float_) * (1./other) + else: + return self * (1./other) elif isspmatrix(other): if other.shape != self.shape: raise ValueError('inconsistent shapes') + elif np.can_cast(self.dtype, np.float_): + return self.astype(np.float_)._binopt(other,'_eldiv_') + else: + return self._binopt(other,'_eldiv_') - return self._binopt(other,'_eldiv_') + elif isdense(other): + return self.todense().__truediv__(other) else: raise NotImplementedError commit 257f278f4311770ab359d522b4b1973130ae6e8b Author: Blake Griffith Date: Wed Aug 21 18:21:24 2013 -0500 TST sparse: Tests for sparse matirx min and max axis args. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index dce5945..fb249b1 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -2242,6 +2242,18 @@ class _TestMinMax(object): assert_equal(X.min(), 0) assert_equal(X.max(), 19) + def test_minmax_axis(self): + def check(): + D = np.matrix(np.arange(50).reshape(5,10)) + X = self.spmatrix(D) + assert_array_equal(X.max(axis=0).A, D.max(axis=0).A) + assert_array_equal(X.max(axis=1).A, D.max(axis=1).A) + + assert_array_equal(X.min(axis=0).A, D.min(axis=0).A) + assert_array_equal(X.min(axis=1).A, D.min(axis=1).A) + + yield check + #------------------------------------------------------------------------------ # Tailored base class for generic tests commit 9494b2218cdcfea27ad74617c09daba5cd1fcbe7 Author: Blake Griffith Date: Wed Aug 21 23:56:43 2013 -0500 ENH sparse: Add support for sparse matrix .min and .max axis arguments. diff --git a/scipy/sparse/data.py b/scipy/sparse/data.py index f3c8c3a..07fe4ce 100644 --- a/scipy/sparse/data.py +++ b/scipy/sparse/data.py @@ -14,6 +14,7 @@ import numpy as np from .base import spmatrix from .sputils import isscalarlike +from .lil import lil_matrix # TODO implement all relevant operations @@ -100,7 +101,46 @@ class _minmax_mixin(object): These are not implemented for dia_matrix, hence the separate class. """ - def max(self): + def _min_or_max_axis(self, axis, min_or_max): + if axis == 0: + mat = self.tocsr() + if not mat.has_sorted_indices: + mat.sort_indices() + + out_mat = lil_matrix((1, self.shape[1])) + zero = self.dtype.type(0) + for i in range(self.shape[1]): + ith_col_data_indices = np.argwhere(mat.indices == i) + ith_col_data = mat.data[ith_col_data_indices] + + # Add a zero if needed + if len(ith_col_data) < self.shape[0]: + ith_col_data = np.append(zero, ith_col_data) + + get_min_or_max = getattr(ith_col_data, min_or_max) + out_mat[0, i] = get_min_or_max() + return self.__class__(out_mat) + + elif axis == 1: + mat = self.tocsc() + if not mat.has_sorted_indices: + mat.sort_indices() + + out_mat = lil_matrix((self.shape[0], 1)) + zero = self.dtype.type(0) + for i in range(self.shape[0]): + ith_row_data_indices = np.argwhere(mat.indices == i) + ith_row_data = mat.data[ith_row_data_indices] + + # Add a zero if needed + if len(ith_row_data) < self.shape[1]: + ith_row_data = np.append(zero, ith_row_data) + + get_min_or_max = getattr(ith_row_data, min_or_max) + out_mat[i, 0] = get_min_or_max() + return self.__class__(out_mat) + + def max(self, axis=None): """Maximum of the elements of this matrix. This takes all elements into account, not just the non-zero ones. @@ -110,15 +150,22 @@ class _minmax_mixin(object): amax : self.dtype Maximum element. """ - zero = self.dtype.type(0) - if self.nnz == 0: - return zero - mx = np.max(self.data) - if self.nnz != np.product(self.shape): - mx = max(zero, mx) - return mx - - def min(self): + if axis is None: + zero = self.dtype.type(0) + if self.nnz == 0: + return zero + mx = np.max(self.data) + if self.nnz != np.product(self.shape): + mx = max(zero, mx) + return mx + + elif (axis == 0) or (axis == 1): + return self._min_or_max_axis(axis, 'max') + + else: + raise ValueError("invalid axis, use 0 for rows, or 1 for columns") + + def min(self, axis=None): """Minimum of the elements of this matrix. This takes all elements into account, not just the non-zero ones. @@ -128,10 +175,17 @@ class _minmax_mixin(object): amin : self.dtype Minimum element. """ - zero = self.dtype.type(0) - if self.nnz == 0: - return zero - mn = np.min(self.data) - if self.nnz != np.product(self.shape): - mn = min(zero, mn) - return mn + if axis is None: + zero = self.dtype.type(0) + if self.nnz == 0: + return zero + mn = np.min(self.data) + if self.nnz != np.product(self.shape): + mn = min(zero, mn) + return mn + + elif (axis == 0) or (axis == 1): + return self._min_or_max_axis(axis, 'min') + + else: + raise ValueError("invalid axis, use 0 for rows, or 1 for columns") commit cf3215ce1609ba892429ae8fe629fb57e1232704 Author: Blake Griffith Date: Mon Aug 12 17:30:48 2013 -0500 BUG: Remove broken take and split methods from DOK. diff --git a/scipy/sparse/dok.py b/scipy/sparse/dok.py index de3d344..043e60d 100644 --- a/scipy/sparse/dok.py +++ b/scipy/sparse/dok.py @@ -491,52 +491,6 @@ class dok_matrix(spmatrix, dict): out[i, 0] = self[i, j] return out - def take(self, cols_or_rows, columns=1): - # Extract columns or rows as indictated from matrix - # assume cols_or_rows is sorted - new = dok_matrix(dtype=self.dtype) # what should the dimensions be ?! - indx = int((columns == 1)) - N = len(cols_or_rows) - if indx: # columns - for key in self.keys(): - num = np.searchsorted(cols_or_rows, key[1]) - if num < N: - newkey = (key[0], num) - new[newkey] = self[key] - else: - for key in self.keys(): - num = np.searchsorted(cols_or_rows, key[0]) - if num < N: - newkey = (num, key[1]) - new[newkey] = self[key] - return new - - def split(self, cols_or_rows, columns=1): - # Similar to take but returns two arrays, the extracted columns plus - # the resulting array. Assumes cols_or_rows is sorted - base = dok_matrix() - ext = dok_matrix() - indx = int((columns == 1)) - if indx: - for key in self.keys(): - num = np.searchsorted(cols_or_rows, key[1]) - if cols_or_rows[num] == key[1]: - newkey = (key[0], num) - ext[newkey] = self[key] - else: - newkey = (key[0], key[1]-num) - base[newkey] = self[key] - else: - for key in self.keys(): - num = np.searchsorted(cols_or_rows, key[0]) - if cols_or_rows[num] == key[0]: - newkey = (num, key[1]) - ext[newkey] = self[key] - else: - newkey = (key[0]-num, key[1]) - base[newkey] = self[key] - return base, ext - def tocoo(self): """ Return a copy of this matrix in COOrdinate format""" from .coo import coo_matrix commit e5c9e678c44a7da41a13cea0d5e3d51b63e5015e Author: Blake Griffith Date: Mon Aug 12 16:45:20 2013 -0500 TST: Turn on scalar multiplication tests for LIL and DOK. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index dce5945..5cf96f5 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -793,11 +793,7 @@ class _TestCommon: assert_array_equal(dat*17.3,(datsp*17.3).todense()) for dtype in self.checked_dtypes: - fails = ((dtype == np.typeDict['int']) and - (self.__class__ == TestLIL or - self.__class__ == TestDOK)) - msg = "LIL and DOK type's __mul__ method has problems with int data." - yield dec.knownfailureif(fails, msg)(check), dtype + yield check, dtype def test_rmul_scalar(self): def check(dtype): commit 6dd6e01d921b08e797cd8255e6c7bb336119fd02 Author: Blake Griffith Date: Sun Aug 11 23:04:58 2013 -0500 BUG: Fix casting in _mul_scalar for DOK and LIL. The way I did the casting is pretty hackish. There should be a better way. diff --git a/scipy/sparse/dok.py b/scipy/sparse/dok.py index fbbe3cb..de3d344 100644 --- a/scipy/sparse/dok.py +++ b/scipy/sparse/dok.py @@ -392,8 +392,10 @@ class dok_matrix(spmatrix, dict): return new def _mul_scalar(self, other): + # Get the result dtype in a bad way. + res_dtype = (np.array([0], dtype=self.dtype) * other).dtype # Multiply this scalar by every element. - new = dok_matrix(self.shape, dtype=self.dtype) + new = dok_matrix(self.shape, dtype=res_dtype) for (key, val) in iteritems(self): new[key] = val * other return new diff --git a/scipy/sparse/lil.py b/scipy/sparse/lil.py index b9f8dc7..db19075 100644 --- a/scipy/sparse/lil.py +++ b/scipy/sparse/lil.py @@ -310,7 +310,11 @@ class lil_matrix(spmatrix, IndexMixin): # Multiply by zero: return the zero matrix new = lil_matrix(self.shape, dtype=self.dtype) else: + # Get the result dtype in a bad way. + res_dtype = (np.array([0], dtype=self.dtype) * other).dtype + new = self.copy() + new = new.astype(res_dtype) # Multiply this scalar by every element. new.data[:] = [[val*other for val in rowvals] for rowvals in new.data] commit 5570f8d0ccd302e9285d0c7a6aee5bc816b2a8d0 Author: Blake Griffith Date: Sun Aug 11 16:31:26 2013 -0500 ENH: Add getrow and getcol to DOK. diff --git a/scipy/sparse/dok.py b/scipy/sparse/dok.py index b47f570..fbbe3cb 100644 --- a/scipy/sparse/dok.py +++ b/scipy/sparse/dok.py @@ -471,6 +471,24 @@ class dok_matrix(spmatrix, dict): new.update(self) return new + def getrow(self, i): + """Returns a copy of row i of the matrix as a (1 x n) + DOK matrix. + """ + out = self.__class__((1, self.shape[1]), dtype=self.dtype) + for j in range(self.shape[1]): + out[0, j] = self[i, j] + return out + + def getcol(self, j): + """Returns a copy of column j of the matrix as a (m x 1) + DOK matrix. + """ + out = self.__class__((self.shape[0], 1), dtype=self.dtype) + for i in range(self.shape[0]): + out[i, 0] = self[i, j] + return out + def take(self, cols_or_rows, columns=1): # Extract columns or rows as indictated from matrix # assume cols_or_rows is sorted commit d301a57a2b99ad27437fb8c74e5c9cceb3da8d92 Author: Blake Griffith Date: Sun Aug 11 13:49:14 2013 -0500 DOC: Add note about SciPy being for numeric data. diff --git a/scipy/sparse/__init__.py b/scipy/sparse/__init__.py index 1e3f25b..0704dc4 100644 --- a/scipy/sparse/__init__.py +++ b/scipy/sparse/__init__.py @@ -5,7 +5,7 @@ Sparse matrices (:mod:`scipy.sparse`) .. currentmodule:: scipy.sparse -SciPy 2-D sparse matrix package. +SciPy 2-D sparse matrix package for numeric data. Contents ======== commit f23a87d9823baeef6407827836d5499d4c3d95e7 Author: Blake Griffith Date: Sun Aug 11 13:29:36 2013 -0500 BUG: Raise error when attempting to create dtype=object sparse matrix. diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index 98fb8e3..15de3d2 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -91,6 +91,8 @@ def getdtype(dtype, a=None, default=None): canCast = False else: raise TypeError("could not interpret data type") + elif dtype == object or dtype == np.object_: + raise TypeError("object dtype is not implemented for sparse matrices.") else: newdtype = np.dtype(dtype) commit 5eace4d10f9d28a982ce708099af1b19ae1a494e Author: Blake Griffith Date: Tue Aug 20 15:20:27 2013 -0500 TST sparse: Add tests Ellipsis indexing with sparse matrices. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index dce5945..ed848fa 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1624,6 +1624,31 @@ class _TestSlicing: for j, b in enumerate(slices): yield check_2, a, b + def test_ellipsis_slicing(self): + b = asmatrix(arange(50).reshape(5,10)) + a = self.spmatrix(b) + + assert_array_equal(a[...].A, b[...].A) + assert_array_equal(a[...,].A, b[...,].A) + + assert_array_equal(a[..., ...].A, b[..., ...].A) + assert_array_equal(a[1, ...].A, b[1, ...].A) + assert_array_equal(a[..., 1].A, b[..., 1].A) + assert_array_equal(a[1:, ...].A, b[1:, ...].A) + assert_array_equal(a[..., 1:].A, b[..., 1:].A) + + assert_array_equal(a[..., ..., ...].A, b[..., ..., ...].A) + assert_array_equal(a[1, ..., ...].A, b[1, ..., ...].A) + assert_array_equal(a[1:, ..., ...].A, b[1:, ..., ...].A) + assert_array_equal(a[..., ..., 1:].A, b[..., ..., 1:].A) + assert_array_equal(a[1:, 1, ...].A, b[1:, 1, ...].A) + assert_array_equal(a[1, ..., 1:].A, b[1, ..., 1:].A) + # These return ints + assert_equal(a[1, 1, ...], b[1, 1, ...]) + assert_equal(a[1, ..., 1], b[1, ..., 1]) + # Bug in NumPy's slicing + assert_array_equal(a[..., ..., 1].A, b[..., ..., 1].A.reshape((5,1))) + class _TestSlicingAssign: def test_slice_scalar_assign(self): commit c0468d13dd5d3055e4a09c2c0c5f975579b20a03 Author: Blake Griffith Date: Tue Aug 20 15:19:14 2013 -0500 ENH sparse: Add Ellipsis indexing to sparse matrices. diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index 98fb8e3..75be505 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -168,6 +168,9 @@ class IndexMixin(object): (index.ndim == 2) and index.dtype.kind == 'b'): return index.nonzero() + # Parse any ellipses. + index = self._check_ellipsis(index) + # Next, parse the tuple or object if isinstance(index, tuple): if len(index) == 2: @@ -183,6 +186,59 @@ class IndexMixin(object): row, col = self._check_boolean(row, col) return row, col + def _check_ellipsis(self, index): + """ Process indices with Ellipsis. Returns modified index. The + plural form of ellipsis is ellipses.""" + if index is Ellipsis: + return (slice(None), slice(None)) + # There is a NumPy wontfix bug which raises an error if we ask + # `Ellipsis in index`, where index is a tuple which contains + # an ndarray. So we work around that. Otherwise we'd do. + # elif isinstance(index, tuple) and (Ellipsis in index): + elif isinstance(index, tuple): + has_ellipsis = False + for i in index: + if i is Ellipsis: + has_ellipsis = True + if not has_ellipsis: + return index + # (...,) + elif len(index) == 1: + # It must be the Ellipsis. + return (slice(None), slice(None)) + # (..., ?), (?, ...) + elif len(index) == 2: + if index[0] is Ellipsis: + index = (slice(None), index[1]) + if index[1] is Ellipsis: + index = (index[0], slice(None)) + return index + elif len(index) == 3: + # Convert to list for ease of manipulation. + index = list(index) + ellipsis_count = index.count(Ellipsis) + # (?, ..., ?), etc. + if ellipsis_count == 1: + index.remove(Ellipsis) + return tuple(index) + # (?, ..., ...), etc. + elif ellipsis_count == 2: + if index[0] is not Ellipsis: + return (index[0], slice(None)) + elif index[1] is not Ellipsis: + return (slice(None), index[1]) + else: + return (slice(None), index[2]) + # (..., ..., ...) + else: + return (slice(None), slice(None)) + # bad stuff + else: + raise IndexError('too many indices') + # indices without Ellipses + else: + return index + def _check_boolean(self, row, col): from .base import isspmatrix # ew... # Supporting sparse boolean indexing with both row and col does commit 7d1a37c7959e95bf1e880373bbeecc4ceb31c53e Author: Blake Griffith Date: Mon Aug 19 23:08:52 2013 -0500 TST sparse: Test arithmetic & slicing with size zero sparse matrices. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index a5ed9ec..478a95e 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1246,6 +1246,67 @@ class _TestCommon: # dense_dot_sparse = dot(self.datsp, b) # assert_array_equal(dense_dot_dense, dense_dot_sparse) + def test_size_zero_matrix_arithmetic(self): + """Test basic matrix arithmatic with shapes like (0,0), (10,0), + (0, 3), etc.""" + mat = np.matrix([]) + a = mat.reshape((0, 0)) + b = mat.reshape((0, 1)) + c = mat.reshape((0, 5)) + d = mat.reshape((1, 0)) + e = mat.reshape((5, 0)) + + asp = self.spmatrix(a) + bsp = self.spmatrix(b) + csp = self.spmatrix(c) + dsp = self.spmatrix(d) + esp = self.spmatrix(e) + + # matrix product. + assert_array_equal(asp.dot(asp).A, np.dot(a, a).A) + assert_array_equal(bsp.dot(dsp).A, np.dot(b, d).A) + assert_array_equal(dsp.dot(bsp).A, np.dot(d, b).A) + assert_array_equal(csp.dot(esp).A, np.dot(c, e).A) + assert_array_equal(esp.dot(csp).A, np.dot(e, c).A) + assert_array_equal(dsp.dot(csp).A, np.dot(d, c).A) + + # bad matrix products + assert_raises(ValueError, dsp.dot, e) + assert_raises(ValueError, asp.dot, d) + + # elemente-wise multiplication + assert_array_equal(asp.multiply(asp).A, np.multiply(a, a).A) + assert_array_equal(bsp.multiply(bsp).A, np.multiply(b, b).A) + assert_array_equal(dsp.multiply(dsp).A, np.multiply(d, d).A) + + assert_array_equal(asp.multiply(a).A, np.multiply(a, a).A) + assert_array_equal(bsp.multiply(b).A, np.multiply(b, b).A) + assert_array_equal(dsp.multiply(d).A, np.multiply(d, d).A) + + assert_array_equal(asp.multiply(6).A, np.multiply(a, 6).A) + assert_array_equal(bsp.multiply(6).A, np.multiply(b, 6).A) + assert_array_equal(dsp.multiply(6).A, np.multiply(d, 6).A) + + # bad element-wise multiplication + assert_raises(ValueError, asp.multiply, c) + assert_raises(ValueError, esp.multiply, c) + + # Addition + assert_array_equal(asp.__add__(asp).A, a.__add__(a).A) + assert_array_equal(bsp.__add__(bsp).A, b.__add__(b).A) + assert_array_equal(dsp.__add__(dsp).A, d.__add__(d).A) + + # bad addition + assert_raises(ValueError, asp.__add__, dsp) + assert_raises(ValueError, bsp.__add__, asp) + + def test_size_zero_creation(self): + """Try creating size zero matrices in several different ways.""" + # create from size zero dense + # create from shape tuple + # create from row, col? + pass + class _TestInplaceArithmetic: def test_imul_scalar(self): @@ -1509,10 +1570,6 @@ class _TestSlicing: 0, 1, s_[:], s_[1:5], -1, -2, -5, array(-1), np.int8(-3)] - # These slices would return a size zero spares matrix which is - # currently unsupported. - bad_slices = [s_[:5:-1]] - def check_1(a): x = A[a] y = B[a] @@ -1524,15 +1581,8 @@ class _TestSlicing: else: assert_array_equal(x.todense(), y, repr(a)) - fail = False - msg = ("This slice returns a size 0 sparse matrix which is currently " - "unsupported.") for j, a in enumerate(slices): - if a in bad_slices: - fail = True - else: - fail = False - yield dec.knownfailureif(fail, msg)(check_1), a + yield check_1, a def check_2(a, b): # Indexing np.matrix with 0-d arrays seems to be broken, @@ -1558,14 +1608,9 @@ class _TestSlicing: else: assert_array_equal(x.todense(), y, repr((a, b))) - fail = False for i, a in enumerate(slices): for j, b in enumerate(slices): - if (a in bad_slices) or (b in bad_slices): - fail = True - else: - fail = False - yield dec.knownfailureif(fail, msg)(check_2), a, b + yield check_2, a, b class _TestSlicingAssign: commit 9f5bb3b4525abf82edac6358d7b73d43ae3f4de5 Author: Blake Griffith Date: Mon Aug 19 18:15:53 2013 -0500 ENH sparse: Allow sparse matrices to be size 0. diff --git a/scipy/sparse/base.py b/scipy/sparse/base.py index e245a4c..e7bdd94 100644 --- a/scipy/sparse/base.py +++ b/scipy/sparse/base.py @@ -79,7 +79,7 @@ class spmatrix(object): except: raise TypeError('invalid shape') - if not (shape[0] >= 1 and shape[1] >= 1): + if not (shape[0] >= 0 and shape[1] >= 0): raise ValueError('invalid shape') if (self._shape != shape) and (self._shape is not None): diff --git a/scipy/sparse/csr.py b/scipy/sparse/csr.py index 760703e..10c3d0a 100644 --- a/scipy/sparse/csr.py +++ b/scipy/sparse/csr.py @@ -274,10 +274,6 @@ class csr_matrix(_cs_matrix, IndexMixin): # If all else fails, try elementwise row, col = self._index_to_arrays(row, col) - if row.size == 0 or col.size == 0: - raise ValueError("Slice returns a size 0 matrix which is not " - "supported by sparse matrices.") - return self.__class__([[self._get_single_element(iii, jjj) for iii, jjj in zip(ii, jj)] for ii, jj in zip(row.tolist(), col.tolist())]) @@ -358,9 +354,6 @@ class csr_matrix(_cs_matrix, IndexMixin): row_indices = abs(row_indices[::-1]) shape = (1, int(np.ceil(float(stop - start) / stride))) - if 0 in shape: - raise ValueError("Slice returns a size 0 matrix which is not " - "supported by sparse matrices.") row_slice = csr_matrix((row_data, row_indices, row_indptr), shape=shape) @@ -396,9 +389,10 @@ class csr_matrix(_cs_matrix, IndexMixin): raise TypeError('expected slice or scalar') def check_bounds(i0, i1, num): - if not (0 <= i0 < num) or not (0 < i1 <= num) or not (i0 < i1): + if not (0 <= i0 <= num) or not (0 <= i1 <= num) or not (i0 <= i1): raise IndexError( - "index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" % + "index out of bounds: 0 <= %d <= %d, 0 <= %d <= %d," + " %d <= %d" % (i0, num, i1, num, i0, i1)) i0, i1 = process_slice(row_slice, M) commit 6a98e99911631e73fec45e504dc0d9295fada53e Author: Blake Griffith Date: Fri Aug 16 14:30:21 2013 -0500 TST: Add warning catches to tests in sparse.linalg. diff --git a/scipy/sparse/linalg/dsolve/tests/test_linsolve.py b/scipy/sparse/linalg/dsolve/tests/test_linsolve.py index 9125ced..92f0121 100644 --- a/scipy/sparse/linalg/dsolve/tests/test_linsolve.py +++ b/scipy/sparse/linalg/dsolve/tests/test_linsolve.py @@ -67,32 +67,36 @@ class TestLinsolve(TestCase): assert_array_almost_equal(x, x2.todense()) def test_non_square(self): - # A is not square. - A = ones((3, 4)) - b = ones((4, 1)) - assert_raises(ValueError, spsolve, A, b) - # A2 and b2 have incompatible shapes. - A2 = csc_matrix(eye(3)) - b2 = array([1.0, 2.0]) - assert_raises(ValueError, spsolve, A2, b2) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + # A is not square. + A = ones((3, 4)) + b = ones((4, 1)) + assert_raises(ValueError, spsolve, A, b) + # A2 and b2 have incompatible shapes. + A2 = csc_matrix(eye(3)) + b2 = array([1.0, 2.0]) + assert_raises(ValueError, spsolve, A2, b2) def test_example_comparison(self): - row = array([0,0,1,2,2,2]) - col = array([0,2,2,0,1,2]) - data = array([1,2,3,-4,5,6]) - sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) - M = sM.todense() + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + row = array([0,0,1,2,2,2]) + col = array([0,2,2,0,1,2]) + data = array([1,2,3,-4,5,6]) + sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float) + M = sM.todense() - row = array([0,0,1,1,0,0]) - col = array([0,2,1,1,0,0]) - data = array([1,1,1,1,1,1]) - sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) - N = sN.todense() + row = array([0,0,1,1,0,0]) + col = array([0,2,1,1,0,0]) + data = array([1,1,1,1,1,1]) + sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float) + N = sN.todense() - sX = spsolve(sM, sN) - X = scipy.linalg.solve(M, N) + sX = spsolve(sM, sN) + X = scipy.linalg.solve(M, N) - assert_array_almost_equal(X, sX.todense()) + assert_array_almost_equal(X, sX.todense()) class TestSplu(object): @@ -104,19 +108,23 @@ class TestSplu(object): random.seed(1234) def test_splu_smoketest(self): - # Check that splu works at all - x = random.rand(self.n) - lu = splu(self.A) - r = self.A*lu.solve(x) - assert_(abs(x - r).max() < 1e-13) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + # Check that splu works at all + x = random.rand(self.n) + lu = splu(self.A) + r = self.A*lu.solve(x) + assert_(abs(x - r).max() < 1e-13) def test_spilu_smoketest(self): - # Check that spilu works at all - x = random.rand(self.n) - lu = spilu(self.A, drop_tol=1e-2, fill_factor=5) - r = self.A*lu.solve(x) - assert_(abs(x - r).max() < 1e-2) - assert_(abs(x - r).max() > 1e-5) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + # Check that spilu works at all + x = random.rand(self.n) + lu = spilu(self.A, drop_tol=1e-2, fill_factor=5) + r = self.A*lu.solve(x) + assert_(abs(x - r).max() < 1e-2) + assert_(abs(x - r).max() > 1e-5) def test_splu_nnz0(self): A = csc_matrix((5,5), dtype='d') diff --git a/scipy/sparse/linalg/isolve/tests/test_iterative.py b/scipy/sparse/linalg/isolve/tests/test_iterative.py index 7994056..4b524da 100644 --- a/scipy/sparse/linalg/isolve/tests/test_iterative.py +++ b/scipy/sparse/linalg/isolve/tests/test_iterative.py @@ -4,6 +4,8 @@ from __future__ import division, print_function, absolute_import +import warnings + import numpy as np from numpy.testing import TestCase, assert_equal, assert_array_equal, \ @@ -12,7 +14,7 @@ from numpy.testing import TestCase, assert_equal, assert_array_equal, \ from numpy import zeros, ones, arange, array, abs, max, ones, eye, iscomplexobj from numpy.linalg import cond from scipy.linalg import norm -from scipy.sparse import spdiags, csr_matrix +from scipy.sparse import spdiags, csr_matrix, SparseEfficiencyWarning from scipy.sparse.linalg import LinearOperator, aslinearoperator from scipy.sparse.linalg.isolve import cg, cgs, bicg, bicgstab, gmres, qmr, minres, lgmres @@ -284,40 +286,42 @@ class TestQMR(TestCase): def test_leftright_precond(self): """Check that QMR works with left and right preconditioners""" - from scipy.sparse.linalg.dsolve import splu - from scipy.sparse.linalg.interface import LinearOperator + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + from scipy.sparse.linalg.dsolve import splu + from scipy.sparse.linalg.interface import LinearOperator - n = 100 + n = 100 - dat = ones(n) - A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n) - b = arange(n,dtype='d') + dat = ones(n) + A = spdiags([-2*dat, 4*dat, -dat], [-1,0,1],n,n) + b = arange(n,dtype='d') - L = spdiags([-dat/2, dat], [-1,0], n, n) - U = spdiags([4*dat, -dat], [0,1], n, n) + L = spdiags([-dat/2, dat], [-1,0], n, n) + U = spdiags([4*dat, -dat], [0,1], n, n) - L_solver = splu(L) - U_solver = splu(U) + L_solver = splu(L) + U_solver = splu(U) - def L_solve(b): - return L_solver.solve(b) + def L_solve(b): + return L_solver.solve(b) - def U_solve(b): - return U_solver.solve(b) + def U_solve(b): + return U_solver.solve(b) - def LT_solve(b): - return L_solver.solve(b,'T') + def LT_solve(b): + return L_solver.solve(b,'T') - def UT_solve(b): - return U_solver.solve(b,'T') + def UT_solve(b): + return U_solver.solve(b,'T') - M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve) - M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve) + M1 = LinearOperator((n,n), matvec=L_solve, rmatvec=LT_solve) + M2 = LinearOperator((n,n), matvec=U_solve, rmatvec=UT_solve) - x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) + x,info = qmr(A, b, tol=1e-8, maxiter=15, M1=M1, M2=M2) - assert_equal(info,0) - assert_normclose(A*x, b, tol=1e-8) + assert_equal(info,0) + assert_normclose(A*x, b, tol=1e-8) class TestGMRES(TestCase): diff --git a/scipy/sparse/linalg/tests/test_expm_multiply.py b/scipy/sparse/linalg/tests/test_expm_multiply.py index 897ee5b..4dd80fb 100644 --- a/scipy/sparse/linalg/tests/test_expm_multiply.py +++ b/scipy/sparse/linalg/tests/test_expm_multiply.py @@ -3,10 +3,13 @@ from __future__ import division, print_function, absolute_import +import warnings + import numpy as np from numpy.testing import (TestCase, run_module_suite, assert_allclose, assert_, assert_equal, decorators) +from scipy.sparse import SparseEfficiencyWarning import scipy.linalg from scipy.sparse.linalg._expm_multiply import (_theta, _compute_p_max, _onenormest_matrix_power, expm_multiply, _expm_multiply_simple, @@ -104,16 +107,18 @@ class TestExpmActionSimple(TestCase): assert_allclose(observed, expected) def test_sparse_expm_multiply(self): - np.random.seed(1234) - n = 40 - k = 3 - nsamples = 10 - for i in range(nsamples): - A = scipy.sparse.rand(n, n, density=0.05) - B = np.random.randn(n, k) - observed = expm_multiply(A, B) - expected = scipy.linalg.expm(A).dot(B) - assert_allclose(observed, expected) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + np.random.seed(1234) + n = 40 + k = 3 + nsamples = 10 + for i in range(nsamples): + A = scipy.sparse.rand(n, n, density=0.05) + B = np.random.randn(n, k) + observed = expm_multiply(A, B) + expected = scipy.linalg.expm(A).dot(B) + assert_allclose(observed, expected) def test_complex(self): A = np.array([ @@ -130,24 +135,26 @@ class TestExpmActionSimple(TestCase): class TestExpmActionInterval(TestCase): def test_sparse_expm_multiply_interval(self): - np.random.seed(1234) - start = 0.1 - stop = 3.2 - n = 40 - k = 3 - endpoint = True - for num in (14, 13, 2): - A = scipy.sparse.rand(n, n, density=0.05) - B = np.random.randn(n, k) - v = np.random.randn(n) - for target in (B, v): - X = expm_multiply(A, target, - start=start, stop=stop, num=num, endpoint=endpoint) - samples = np.linspace(start=start, stop=stop, - num=num, endpoint=endpoint) - for solution, t in zip(X, samples): - assert_allclose(solution, - scipy.linalg.expm(t*A).dot(target)) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + np.random.seed(1234) + start = 0.1 + stop = 3.2 + n = 40 + k = 3 + endpoint = True + for num in (14, 13, 2): + A = scipy.sparse.rand(n, n, density=0.05) + B = np.random.randn(n, k) + v = np.random.randn(n) + for target in (B, v): + X = expm_multiply(A, target, + start=start, stop=stop, num=num, endpoint=endpoint) + samples = np.linspace(start=start, stop=stop, + num=num, endpoint=endpoint) + for solution, t in zip(X, samples): + assert_allclose(solution, + scipy.linalg.expm(t*A).dot(target)) def test_expm_multiply_interval_vector(self): np.random.seed(1234) diff --git a/scipy/sparse/linalg/tests/test_matfuncs.py b/scipy/sparse/linalg/tests/test_matfuncs.py index ddf8cf3..b991787 100644 --- a/scipy/sparse/linalg/tests/test_matfuncs.py +++ b/scipy/sparse/linalg/tests/test_matfuncs.py @@ -9,6 +9,8 @@ from __future__ import division, print_function, absolute_import import math +import warnings + import numpy as np from numpy import array, eye, dot, sqrt, double, exp, random from numpy.linalg import matrix_power @@ -16,7 +18,7 @@ from numpy.testing import (TestCase, run_module_suite, assert_allclose, assert_, assert_array_almost_equal, assert_array_almost_equal_nulp) -from scipy.sparse import csc_matrix +from scipy.sparse import csc_matrix, SparseEfficiencyWarning from scipy.sparse.construct import eye as speye from scipy.sparse.linalg.matfuncs import (expm, ProductOperator, MatrixPowerOperator, @@ -67,18 +69,22 @@ class TestExpM(TestCase): def test_padecases_dtype_sparse_float(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.float64 - for scale in [1e-2, 1e-1, 5e-1, 1, 10]: - a = scale * speye(3, 3, dtype=dtype, format='csc') - e = exp(scale) * eye(3, dtype=dtype) - assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + for scale in [1e-2, 1e-1, 5e-1, 1, 10]: + a = scale * speye(3, 3, dtype=dtype, format='csc') + e = exp(scale) * eye(3, dtype=dtype) + assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100) def test_padecases_dtype_sparse_complex(self): # float32 and complex64 lead to errors in spsolve/UMFpack dtype = np.complex128 - for scale in [1e-2, 1e-1, 5e-1, 1, 10]: - a = scale * speye(3, 3, dtype=dtype, format='csc') - e = exp(scale) * eye(3, dtype=dtype) - assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + for scale in [1e-2, 1e-1, 5e-1, 1, 10]: + a = scale * speye(3, 3, dtype=dtype, format='csc') + e = exp(scale) * eye(3, dtype=dtype) + assert_array_almost_equal_nulp(expm(a).toarray(), e, nulp=100) def test_logm_consistency(self): random.seed(1234) commit a335d5673ac18587e0ab0f780489a7717ceb795b Author: Blake Griffith Date: Fri Aug 16 14:26:53 2013 -0500 TST: Add warning catches for indexing and inv and exp tests. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 0c54ac3..4bef39a 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -593,17 +593,20 @@ class _TestCommon: yield check, dtype def test_expm(self): - M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) - sM = self.spmatrix(M, shape=(3,3), dtype=float) - Mexp = scipy.linalg.expm(M) - sMexp = expm(sM).todense() - assert_array_almost_equal((sMexp - Mexp), zeros((3, 3))) - - N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]]) - sN = self.spmatrix(N, shape=(3,3), dtype=float) - Nexp = scipy.linalg.expm(N) - sNexp = expm(sN).todense() - assert_array_almost_equal((sNexp - Nexp), zeros((3, 3))) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + + M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) + sM = self.spmatrix(M, shape=(3,3), dtype=float) + Mexp = scipy.linalg.expm(M) + sMexp = expm(sM).todense() + assert_array_almost_equal((sMexp - Mexp), zeros((3, 3))) + + N = array([[3., 0., 1.], [0., 2., 0.], [0., 0., 0.]]) + sN = self.spmatrix(N, shape=(3,3), dtype=float) + Nexp = scipy.linalg.expm(N) + sNexp = expm(sN).todense() + assert_array_almost_equal((sNexp - Nexp), zeros((3, 3))) def test_inv(self): def check(dtype): @@ -612,7 +615,9 @@ class _TestCommon: sMinv = inv(sM) assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3)) for dtype in [float]: - yield check, dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield check, dtype def test_from_array(self): A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) @@ -1294,40 +1299,44 @@ class _TestInplaceArithmetic: class _TestGetSet: def test_getelement(self): - D = array([[1,0,0], - [4,3,0], - [0,2,0], - [0,0,0]]) - A = self.spmatrix(D) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + D = array([[1,0,0], + [4,3,0], + [0,2,0], + [0,0,0]]) + A = self.spmatrix(D) - M,N = D.shape + M,N = D.shape - for i in range(-M, M): - for j in range(-N, N): - assert_equal(A[i,j], D[i,j]) + for i in range(-M, M): + for j in range(-N, N): + assert_equal(A[i,j], D[i,j]) - for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: - assert_raises((IndexError, TypeError), A.__getitem__, ij) + for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: + assert_raises((IndexError, TypeError), A.__getitem__, ij) def test_setelement(self): - A = self.spmatrix((3,4)) - A[0, 0] = 0 # bug 870 - A[1, 2] = 4.0 - A[0, 1] = 3 - A[2, 0] = 2.0 - A[0,-1] = 8 - A[-1,-2] = 7 - A[0, 1] = 5 - assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]]) - - for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]: - assert_raises(IndexError, A.__setitem__, ij, 123.0) - - for v in [[1,2,3], array([1,2,3])]: - assert_raises(ValueError, A.__setitem__, (0,0), v) - - for v in [3j]: - assert_raises(TypeError, A.__setitem__, (0,0), v) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + A = self.spmatrix((3,4)) + A[0, 0] = 0 # bug 870 + A[1, 2] = 4.0 + A[0, 1] = 3 + A[2, 0] = 2.0 + A[0,-1] = 8 + A[-1,-2] = 7 + A[0, 1] = 5 + assert_array_equal(A.todense(),[[0,5,0,8],[0,0,4,0],[2,0,7,0]]) + + for ij in [(0,4),(-1,4),(3,0),(3,4),(3,-1)]: + assert_raises(IndexError, A.__setitem__, ij, 123.0) + + for v in [[1,2,3], array([1,2,3])]: + assert_raises(ValueError, A.__setitem__, (0,0), v) + + for v in [3j]: + assert_raises(TypeError, A.__setitem__, (0,0), v) def test_scalar_assign_2(self): n, m = (5, 10) @@ -1342,16 +1351,20 @@ class _TestGetSet: # [i,j] for i, j in [(2, 3), (-1, 8), (-1, -2), (array(-1), -2), (-1, array(-2)), (array(-1), array(-2))]: - _test_set(i, j, 1) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + _test_set(i, j, 1) def test_index_scalar_assign(self): - A = self.spmatrix((5, 5)) - B = np.zeros((5, 5)) - for C in [A, B]: - C[0,1] = 1 - C[3,0] = 4 - C[3,0] = 9 - assert_array_equal(A.toarray(), B) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + A = self.spmatrix((5, 5)) + B = np.zeros((5, 5)) + for C in [A, B]: + C[0,1] = 1 + C[3,0] = 4 + C[3,0] = 9 + assert_array_equal(A.toarray(), B) class _TestSolve: @@ -1557,15 +1570,19 @@ class _TestSlicing: class _TestSlicingAssign: def test_slice_scalar_assign(self): - A = self.spmatrix((5, 5)) - B = np.zeros((5, 5)) - for C in [A, B]: - C[0:1,1] = 1 - C[3:0,0] = 4 - C[3:4,0] = 9 - C[0,4:] = 1 - C[3::-1,4:] = 9 - assert_array_equal(A.toarray(), B) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + A = self.spmatrix((5, 5)) + B = np.zeros((5, 5)) + for C in [A, B]: + C[0:1,1] = 1 + C[3:0,0] = 4 + C[3:4,0] = 9 + C[0,4:] = 1 + C[3::-1,4:] = 9 + assert_array_equal(A.toarray(), B) def test_slice_assign_2(self): n, m = (5, 10) @@ -1580,88 +1597,96 @@ class _TestSlicingAssign: # [i,1:2] for i, j in [(2, slice(3)), (2, slice(None, 10, 4)), (2, slice(5, -2)), (array(2), slice(5, -2))]: - _test_set(i, j) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + _test_set(i, j) def test_self_self_assignment(self): # Tests whether a row of one lil_matrix can be assigned to # another. - B = self.spmatrix((4,3)) - B[0,0] = 2 - B[1,2] = 7 - B[2,1] = 3 - B[3,0] = 10 + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + B = self.spmatrix((4,3)) + B[0,0] = 2 + B[1,2] = 7 + B[2,1] = 3 + B[3,0] = 10 - A = B / 10 - B[0,:] = A[0,:] - assert_array_equal(A[0,:].A, B[0,:].A) + A = B / 10 + B[0,:] = A[0,:] + assert_array_equal(A[0,:].A, B[0,:].A) - A = B / 10 - B[:,:] = A[:1,:1] - assert_equal(A[0,0], B[3,2]) + A = B / 10 + B[:,:] = A[:1,:1] + assert_equal(A[0,0], B[3,2]) - A = B / 10 - B[:-1,0] = A[0,:].T - assert_array_equal(A[0,:].A.T, B[:-1,0].A) + A = B / 10 + B[:-1,0] = A[0,:].T + assert_array_equal(A[0,:].A.T, B[:-1,0].A) def test_slice_assignment(self): - B = self.spmatrix((4,3)) - B[0,0] = 5 - B[1,2] = 3 - B[2,1] = 7 + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + B = self.spmatrix((4,3)) + B[0,0] = 5 + B[1,2] = 3 + B[2,1] = 7 - expected = array([[10,0,0], - [0,0,6], - [0,14,0], - [0,0,0]]) + expected = array([[10,0,0], + [0,0,6], + [0,14,0], + [0,0,0]]) - B[:,:] = B+B - assert_array_equal(B.todense(),expected) + B[:,:] = B+B + assert_array_equal(B.todense(),expected) - block = [[1,0],[0,4]] - B[:2,:2] = csc_matrix(array(block)) - assert_array_equal(B.todense()[:2,:2],block) + block = [[1,0],[0,4]] + B[:2,:2] = csc_matrix(array(block)) + assert_array_equal(B.todense()[:2,:2],block) def test_set_slice(self): - A = self.spmatrix((5,10)) - B = matrix(zeros((5,10), float)) - - s_ = np.s_ - slices = [s_[:2], s_[1:2], s_[3:], s_[3::2], - s_[8:3:-1], s_[4::-2], s_[:5:-1], - 0, 1, s_[:], s_[1:5], -1, -2, -5, - array(-1), np.int8(-3)] - - for j, a in enumerate(slices): - A[a] = j - B[a] = j - assert_array_equal(A.todense(), B, repr(a)) - - for i, a in enumerate(slices): - for j, b in enumerate(slices): - A[a,b] = 10*i + 1000*(j+1) - B[a,b] = 10*i + 1000*(j+1) - assert_array_equal(A.todense(), B, repr((a, b))) - - A[0, 1:10:2] = xrange(1,10,2) - B[0, 1:10:2] = xrange(1,10,2) - assert_array_equal(A.todense(), B) - A[1:5:2,0] = np.array(range(1,5,2))[:,None] - B[1:5:2,0] = np.array(range(1,5,2))[:,None] - assert_array_equal(A.todense(), B) - - # The next commands should raise exceptions - assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100))) - assert_raises(ValueError, A.__setitem__, (0, 0), arange(100)) - assert_raises(ValueError, A.__setitem__, (0, slice(None)), - list(range(100))) - assert_raises(ValueError, A.__setitem__, (slice(None), 1), - list(range(100))) - assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy()) - assert_raises(ValueError, A.__setitem__, - ([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4]) - assert_raises(ValueError, A.__setitem__, - ([[1, 2, 3], [0, 3, 4], [4, 1, 3]], - [[1, 2, 4], [0, 1, 3]]), [2, 3, 4]) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + A = self.spmatrix((5,10)) + B = matrix(zeros((5,10), float)) + + s_ = np.s_ + slices = [s_[:2], s_[1:2], s_[3:], s_[3::2], + s_[8:3:-1], s_[4::-2], s_[:5:-1], + 0, 1, s_[:], s_[1:5], -1, -2, -5, + array(-1), np.int8(-3)] + + for j, a in enumerate(slices): + A[a] = j + B[a] = j + assert_array_equal(A.todense(), B, repr(a)) + + for i, a in enumerate(slices): + for j, b in enumerate(slices): + A[a,b] = 10*i + 1000*(j+1) + B[a,b] = 10*i + 1000*(j+1) + assert_array_equal(A.todense(), B, repr((a, b))) + + A[0, 1:10:2] = xrange(1,10,2) + B[0, 1:10:2] = xrange(1,10,2) + assert_array_equal(A.todense(), B) + A[1:5:2,0] = np.array(range(1,5,2))[:,None] + B[1:5:2,0] = np.array(range(1,5,2))[:,None] + assert_array_equal(A.todense(), B) + + # The next commands should raise exceptions + assert_raises(ValueError, A.__setitem__, (0, 0), list(range(100))) + assert_raises(ValueError, A.__setitem__, (0, 0), arange(100)) + assert_raises(ValueError, A.__setitem__, (0, slice(None)), + list(range(100))) + assert_raises(ValueError, A.__setitem__, (slice(None), 1), + list(range(100))) + assert_raises(ValueError, A.__setitem__, (slice(None), 1), A.copy()) + assert_raises(ValueError, A.__setitem__, + ([[1, 2, 3], [0, 3, 4]], [1, 2, 3]), [1, 2, 3, 4]) + assert_raises(ValueError, A.__setitem__, + ([[1, 2, 3], [0, 3, 4], [4, 1, 3]], + [[1, 2, 4], [0, 1, 3]]), [2, 3, 4]) class _TestFancyIndexing: @@ -1839,9 +1864,11 @@ class _TestFancyIndexing: class _TestFancyIndexingAssign: def test_bad_index_assign(self): - A = self.spmatrix(np.zeros([5, 5])) - assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2) - assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + A = self.spmatrix(np.zeros([5, 5])) + assert_raises((IndexError, ValueError, TypeError), A.__setitem__, "foo", 2) + assert_raises((IndexError, ValueError, TypeError), A.__setitem__, (2, "foo"), 5) def test_fancy_indexing_set(self): n, m = (5, 10) @@ -1853,50 +1880,54 @@ class _TestFancyIndexingAssign: B[i, j] = 1 assert_array_almost_equal(A.todense(), B) # [1:2,1:2] - for i, j in [((2, 3, 4), slice(None, 10, 4)), - (np.arange(3), slice(5, -2)), - (slice(2, 5), slice(5, -2))]: - _test_set_slice(i, j) - for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]: - _test_set_slice(i, j) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + for i, j in [((2, 3, 4), slice(None, 10, 4)), + (np.arange(3), slice(5, -2)), + (slice(2, 5), slice(5, -2))]: + _test_set_slice(i, j) + for i, j in [(np.arange(3), np.arange(3)), ((0, 3, 4), (1, 2, 4))]: + _test_set_slice(i, j) def test_sequence_assignment(self): - A = self.spmatrix((4,3)) - B = self.spmatrix(eye(3,4)) - - i0 = [0,1,2] - i1 = (0,1,2) - i2 = array(i0) - - A[0,i0] = B[i0,0].T - A[1,i1] = B[i1,1].T - A[2,i2] = B[i2,2].T - assert_array_equal(A.todense(),B.T.todense()) - - # column slice - A = self.spmatrix((2,3)) - A[1,1:3] = [10,20] - assert_array_equal(A.todense(), [[0,0,0],[0,10,20]]) - - # row slice - A = self.spmatrix((3,2)) - A[1:3,1] = [[10],[20]] - assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]]) - - # both slices - A = self.spmatrix((3,3)) - B = asmatrix(np.zeros((3,3))) - for C in [A, B]: - C[[0,1,2], [0,1,2]] = [4,5,6] - assert_array_equal(A.toarray(), B) - - # both slices (2) - A = self.spmatrix((4, 3)) - A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] - assert_almost_equal(A.sum(), 6) - B = asmatrix(np.zeros((4, 3))) - B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] - assert_array_equal(A.todense(), B) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + A = self.spmatrix((4,3)) + B = self.spmatrix(eye(3,4)) + + i0 = [0,1,2] + i1 = (0,1,2) + i2 = array(i0) + + A[0,i0] = B[i0,0].T + A[1,i1] = B[i1,1].T + A[2,i2] = B[i2,2].T + assert_array_equal(A.todense(),B.T.todense()) + + # column slice + A = self.spmatrix((2,3)) + A[1,1:3] = [10,20] + assert_array_equal(A.todense(), [[0,0,0],[0,10,20]]) + + # row slice + A = self.spmatrix((3,2)) + A[1:3,1] = [[10],[20]] + assert_array_equal(A.todense(), [[0,0],[0,10],[0,20]]) + + # both slices + A = self.spmatrix((3,3)) + B = asmatrix(np.zeros((3,3))) + for C in [A, B]: + C[[0,1,2], [0,1,2]] = [4,5,6] + assert_array_equal(A.toarray(), B) + + # both slices (2) + A = self.spmatrix((4, 3)) + A[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] + assert_almost_equal(A.sum(), 6) + B = asmatrix(np.zeros((4, 3))) + B[(1, 2, 3), (0, 1, 2)] = [1, 2, 3] + assert_array_equal(A.todense(), B) class _TestFancyMultidim: @@ -1975,10 +2006,12 @@ class _TestFancyMultidimAssign: B[i, j] = 1 assert_array_almost_equal(A.todense(), B) # [[[1, 2], [1, 2]], [1, 2]] - for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]), - (np.array([0, 4]), [[0, 3], [1, 2]]), - ([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]: - _test_set_slice(i, j) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + for i, j in [(np.array([[1, 2], [1, 3]]), [1, 3]), + (np.array([0, 4]), [[0, 3], [1, 2]]), + ([[1, 2, 3], [0, 2, 4]], [[0, 4, 3], [4, 1, 2]])]: + _test_set_slice(i, j) def test_fancy_assign_list(self): np.random.seed(1234) @@ -2484,12 +2517,14 @@ class TestDOK(sparse_test_class(slicing=False, assert_array_equal(D.A, E.A) def test_add_nonzero(self): - A = self.spmatrix((3,2)) - A[0,1] = -10 - A[2,0] = 20 - A = A + 10 - B = matrix([[10, 0], [10, 10], [30, 10]]) - assert_array_equal(A.todense(), B) + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + A = self.spmatrix((3,2)) + A[0,1] = -10 + A[2,0] = 20 + A = A + 10 + B = matrix([[10, 0], [10, 10], [30, 10]]) + assert_array_equal(A.todense(), B) def test_convert(self): # Test provided by Andrew Straw. Fails in SciPy <= r1477. commit 5fc9e3e111652ed387e2d4564d845977164a9346 Author: Blake Griffith Date: Fri Aug 16 11:46:14 2013 -0500 TST: Small test optimizations. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 28046f6..0c54ac3 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -138,11 +138,10 @@ class _TestCommon: assert_array_equal(dat == 0, (datsp == 0).todense()) assert_array_equal(dat == 1, (datsp == 1).todense()) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or + self.__class__ == TestCSR) for dtype in self.checked_dtypes: - fails = not (self.__class__ == TestBSR or - self.__class__ == TestCSC or - self.__class__ == TestCSR) - msg = "Bool comparisons only implemented for BSR, CSC, and CSR." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) @@ -175,11 +174,10 @@ class _TestCommon: assert_array_equal(0 != dat, (0 != datsp).todense()) assert_array_equal(1 != dat, (1 != datsp).todense()) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or + self.__class__ == TestCSR) for dtype in self.checked_dtypes: - fails = not (self.__class__ == TestBSR or - self.__class__ == TestCSC or - self.__class__ == TestCSR) - msg = "Bool comparisons only implemented for BSR, CSC, and CSR." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) @@ -241,18 +239,17 @@ class _TestCommon: # dense rhs fails assert_array_equal(dat < datsp2, datsp < dat2) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or + self.__class__ == TestCSR) for dtype in self.checked_dtypes: - fails = not (self.__class__ == TestBSR or - self.__class__ == TestCSC or - self.__class__ == TestCSR) - msg = "Bool comparisons only implemented for BSR, CSC, and CSR." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) yield dec.skipif(fails, msg)(check), dtype + msg = "Dense rhs is not supported for inequalities." for dtype in self.checked_dtypes: - msg = "Dense rhs is not supported for inequalities." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) @@ -313,18 +310,17 @@ class _TestCommon: # dense rhs fails assert_array_equal(dat > datsp2, datsp > dat2) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or + self.__class__ == TestCSR) for dtype in self.checked_dtypes: - fails = not (self.__class__ == TestBSR or - self.__class__ == TestCSC or - self.__class__ == TestCSR) - msg = "Bool comparisons only implemented for BSR, CSC, and CSR." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) yield dec.skipif(fails, msg)(check), dtype + msg = "Dense rhs is not supported for inequalities." for dtype in self.checked_dtypes: - msg = "Dense rhs is not supported for inequalities." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) @@ -383,18 +379,17 @@ class _TestCommon: # dense rhs fails assert_array_equal(dat <= datsp2, datsp <= dat2) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or + self.__class__ == TestCSR) for dtype in self.checked_dtypes: - fails = not (self.__class__ == TestBSR or - self.__class__ == TestCSC or - self.__class__ == TestCSR) - msg = "Bool comparisons only implemented for BSR, CSC, and CSR." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) yield dec.skipif(fails, msg)(check), dtype + msg = "Dense rhs is not supported for inequalities." for dtype in self.checked_dtypes: - msg = "Dense rhs is not supported for inequalities." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) @@ -454,18 +449,17 @@ class _TestCommon: # dense rhs fails assert_array_equal(dat >= datsp2, datsp >= dat2) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or + self.__class__ == TestCSR) for dtype in self.checked_dtypes: - fails = not (self.__class__ == TestBSR or - self.__class__ == TestCSC or - self.__class__ == TestCSR) - msg = "Bool comparisons only implemented for BSR, CSC, and CSR." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) yield dec.skipif(fails, msg)(check), dtype + msg = "Dense rhs is not supported for inequalities." for dtype in self.checked_dtypes: - msg = "Dense rhs is not supported for inequalities." with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) warnings.simplefilter("ignore", category=SparseEfficiencyWarning) commit 6fc83c8bb82f82c876db9b6cc84678a6a65e9faf Author: Blake Griffith Date: Fri Aug 16 11:44:09 2013 -0500 TST: Silence some warnings. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index c76d455..28046f6 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -143,7 +143,10 @@ class _TestCommon: self.__class__ == TestCSC or self.__class__ == TestCSR) msg = "Bool comparisons only implemented for BSR, CSC, and CSR." - yield dec.skipif(fails, msg)(check), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.skipif(fails, msg)(check), dtype def test_ne(self): def check(dtype): @@ -177,7 +180,10 @@ class _TestCommon: self.__class__ == TestCSC or self.__class__ == TestCSR) msg = "Bool comparisons only implemented for BSR, CSC, and CSR." - yield dec.skipif(fails, msg)(check), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.skipif(fails, msg)(check), dtype def test_lt(self): def check(dtype): @@ -240,11 +246,17 @@ class _TestCommon: self.__class__ == TestCSC or self.__class__ == TestCSR) msg = "Bool comparisons only implemented for BSR, CSC, and CSR." - yield dec.skipif(fails, msg)(check), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.skipif(fails, msg)(check), dtype for dtype in self.checked_dtypes: msg = "Dense rhs is not supported for inequalities." - yield dec.knownfailureif(True, msg)(check_fail), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.knownfailureif(True, msg)(check_fail), dtype def test_gt(self): def check(dtype): @@ -306,11 +318,17 @@ class _TestCommon: self.__class__ == TestCSC or self.__class__ == TestCSR) msg = "Bool comparisons only implemented for BSR, CSC, and CSR." - yield dec.skipif(fails, msg)(check), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.skipif(fails, msg)(check), dtype for dtype in self.checked_dtypes: msg = "Dense rhs is not supported for inequalities." - yield dec.knownfailureif(True, msg)(check_fail), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.knownfailureif(True, msg)(check_fail), dtype def test_le(self): def check(dtype): @@ -370,11 +388,17 @@ class _TestCommon: self.__class__ == TestCSC or self.__class__ == TestCSR) msg = "Bool comparisons only implemented for BSR, CSC, and CSR." - yield dec.skipif(fails, msg)(check), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.skipif(fails, msg)(check), dtype for dtype in self.checked_dtypes: msg = "Dense rhs is not supported for inequalities." - yield dec.knownfailureif(True, msg)(check_fail), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.knownfailureif(True, msg)(check_fail), dtype def test_ge(self): def check(dtype): @@ -435,11 +459,17 @@ class _TestCommon: self.__class__ == TestCSC or self.__class__ == TestCSR) msg = "Bool comparisons only implemented for BSR, CSC, and CSR." - yield dec.skipif(fails, msg)(check), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.skipif(fails, msg)(check), dtype for dtype in self.checked_dtypes: msg = "Dense rhs is not supported for inequalities." - yield dec.knownfailureif(True, msg)(check_fail), dtype + with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=np.ComplexWarning) + warnings.simplefilter("ignore", category=SparseEfficiencyWarning) + yield dec.knownfailureif(True, msg)(check_fail), dtype def test_empty(self): # create empty matrices commit c859927048d2a1f6401088f6585ec7962d9d9514 Author: Blake Griffith Date: Mon Aug 12 14:55:19 2013 -0500 WIP, TST: Add knownfailures to tests for size == 0 cases. Also removed tests that previously checked the size 0 workaround. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index e8a1717..3a66d16 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1473,6 +1473,10 @@ class _TestSlicing: 0, 1, s_[:], s_[1:5], -1, -2, -5, array(-1), np.int8(-3)] + # These slices would return a size zero spares matrix which is + # currently unsupported. + bad_slices = [s_[:5:-1]] + def check_1(a): x = A[a] y = B[a] @@ -1484,8 +1488,15 @@ class _TestSlicing: else: assert_array_equal(x.todense(), y, repr(a)) + fail = False + msg = "This slice returns a size 0 sparse matrix which is currently " + "unsupported." for j, a in enumerate(slices): - yield check_1, a + if a in bad_slices: + fail = True + else: + fail = False + yield dec.knownfailureif(fail, msg)(check_1), a def check_2(a, b): # Indexing np.matrix with 0-d arrays seems to be broken, @@ -1511,9 +1522,14 @@ class _TestSlicing: else: assert_array_equal(x.todense(), y, repr((a, b))) + fail = False for i, a in enumerate(slices): for j, b in enumerate(slices): - yield check_2, a, b + if (a in bad_slices) or (b in bad_slices): + fail = True + else: + fail = False + yield dec.knownfailureif(fail, msg)(check_2), a, b class _TestSlicingAssign: @@ -1797,24 +1813,6 @@ class _TestFancyIndexing: assert_raises(IndexError, A.__getitem__, Ysp) assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1)) - def test_fancy_indexing_empty_index(self): - def check(): - B = asmatrix(arange(50).reshape(5,10)) - A = self.spmatrix(B) - expected_empty_result = self.spmatrix(np.array([[0]])) - - assert_equal(A[1:1], expected_empty_result) - assert_equal(A[1:2:-1], expected_empty_result) - - msg = "LIL can actually be 0x0 but converting it to other formats " - "causes problems, so we mark it as known fail." - - fails = False - if self.__class__ == TestLIL: - fails = True - - yield dec.skipif(fails, msg)(check) - class _TestFancyIndexingAssign: def test_bad_index_assign(self): commit c1fee97ba776477eff82c1195132a97b9381fcf8 Author: Blake Griffith Date: Mon Aug 12 14:26:58 2013 -0500 WIP: Disallow indexes which return size 0 sparse matrices. Also add errors in this case. diff --git a/scipy/sparse/coo.py b/scipy/sparse/coo.py index 9321054..ed4e0b8 100644 --- a/scipy/sparse/coo.py +++ b/scipy/sparse/coo.py @@ -181,8 +181,6 @@ class coo_matrix(_data_matrix, _minmax_mixin): if np.rank(M) != 2: raise TypeError('expected rank <= 2 array or matrix') - elif M.size == 0: - self.shape = (1,1) else: self.shape = M.shape diff --git a/scipy/sparse/csr.py b/scipy/sparse/csr.py index 54e813c..72497fe 100644 --- a/scipy/sparse/csr.py +++ b/scipy/sparse/csr.py @@ -273,6 +273,11 @@ class csr_matrix(_cs_matrix, IndexMixin): # If all else fails, try elementwise row, col = self._index_to_arrays(row, col) + + if row.size == 0 or col.size == 0: + raise ValueError("Slice returns a size 0 matrix which is not " + "supported by sparse matrices.") + return self.__class__([[self._get_single_element(iii, jjj) for iii, jjj in zip(ii, jj)] for ii, jj in zip(row.tolist(), col.tolist())]) @@ -353,9 +358,9 @@ class csr_matrix(_cs_matrix, IndexMixin): row_indices = abs(row_indices[::-1]) shape = (1, int(np.ceil(float(stop - start) / stride))) - # Catch empty slice. if 0 in shape: - shape = (1,1) + raise ValueError("Slice returns a size 0 matrix which is not " + "supported by sparse matrices.") row_slice = csr_matrix((row_data, row_indices, row_indptr), shape=shape) commit 0e523ee8d2f2becfd3c3ab51b1c43731c958044c Author: Blake Griffith Date: Sun Aug 11 18:55:55 2013 -0500 WIP: Add check for size zero slice result. diff --git a/scipy/sparse/csr.py b/scipy/sparse/csr.py index fdf8b55..54e813c 100644 --- a/scipy/sparse/csr.py +++ b/scipy/sparse/csr.py @@ -352,7 +352,10 @@ class csr_matrix(_cs_matrix, IndexMixin): row_data = row_data[::-1] row_indices = abs(row_indices[::-1]) - shape = (1, np.ceil(float(stop - start) / stride)) + shape = (1, int(np.ceil(float(stop - start) / stride))) + # Catch empty slice. + if 0 in shape: + shape = (1,1) row_slice = csr_matrix((row_data, row_indices, row_indptr), shape=shape) commit d9ae272a3c0569a5195882b073651d31633a7acc Merge: bcba015 065f621 Author: Blake Griffith Date: Sun Aug 11 12:05:16 2013 -0700 Merge pull request #1 from pv/pr-2689 Pr 2689 commit bcba015a5ef90296dd30ef821ed7197f89d2e6cc Author: Blake Griffith Date: Sun Aug 11 01:31:31 2013 -0500 TST: Add tests for empty fancy indexing. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 49d7ac6..e425042 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1802,6 +1802,24 @@ class _TestFancyIndexing: assert_raises(IndexError, A.__getitem__, Ysp) assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1)) + def test_fancy_indexing_empty_index(self): + def check(): + B = asmatrix(arange(50).reshape(5,10)) + A = self.spmatrix(B) + expected_empty_result = self.spmatrix(np.array([[0]])) + + assert_equal(A[1:1], expected_empty_result) + assert_equal(A[1:2:-1], expected_empty_result) + + msg = "LIL can actually be 0x0 but converting it to other formats " + "causes problems, so we mark it as known fail." + + fails = False + if self.__class__ == TestLIL: + fails = True + + yield dec.skipif(fails, msg)(check) + class _TestFancyIndexingAssign: def test_bad_index_assign(self): commit 32ca14cfb5afefe1f022c8f91d15984236c8882a Author: Blake Griffith Date: Sun Aug 11 01:29:47 2013 -0500 WIP: Change CSR index checking to allow slices like `1:1`. diff --git a/scipy/sparse/csr.py b/scipy/sparse/csr.py index feda469..fdf8b55 100644 --- a/scipy/sparse/csr.py +++ b/scipy/sparse/csr.py @@ -388,9 +388,9 @@ class csr_matrix(_cs_matrix, IndexMixin): raise TypeError('expected slice or scalar') def check_bounds(i0, i1, num): - if not (0 <= i0 < num) or not (0 < i1 <= num) or not (i0 < i1): + if not (0 <= i0 < num) or not (0 < i1 <= num) or not (i0 <= i1): raise IndexError( - "index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<%d" % + "index out of bounds: 0<=%d<%d, 0<=%d<%d, %d<=%d" % (i0, num, i1, num, i0, i1)) i0, i1 = process_slice(row_slice, M) @@ -398,6 +398,9 @@ class csr_matrix(_cs_matrix, IndexMixin): check_bounds(i0, i1, M) check_bounds(j0, j1, N) + if i0 == i1 or j0 == j1: + return self.__class__((1, 1), dtype=self.dtype) + indptr, indices, data = get_csr_submatrix(M, N, self.indptr, self.indices, self.data, int(i0), int(i1), int(j0), int(j1)) commit ab1733d6c681e709b210637f7fe0b2410d236cfc Author: Blake Griffith Date: Sat Aug 10 18:11:29 2013 -0500 WIP: Remove unused method. Refactor sort in nonzero. Adresses @pv's comments: https://github.com/scipy/scipy/pull/2689#discussion-diff-5697521 https://github.com/scipy/scipy/pull/2689#discussion-diff-5697523 https://github.com/scipy/scipy/pull/2689#discussion-diff-5697535 diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index 55b84af..1181225 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -155,41 +155,17 @@ class csc_matrix(_cs_matrix, IndexMixin): """CSC can't use _cs_matrix's .nonzero method because it returns the indices sorted for self transposed. """ - # Get row and col indices + # Get row and col indices, from _cs_matrix.tocoo major_dim, minor_dim = self._swap(self.shape) minor_indices = self.indices major_indices = np.empty(len(minor_indices), dtype=np.intc) sparsetools.expandptr(major_dim,self.indptr, major_indices) row, col = self._swap((major_indices, minor_indices)) - row_out = row.copy() - col_out = col.copy() - # Sort them to be in C-style order ind = np.lexsort((col, row)) - for i, j in enumerate(ind): - row_out[i] = row[j] - col_out[i] = col[j] - - return row_out, col_out - - def _bl_to_tl_sort(self, row, col): - """ Sort indices so they are returned properly when the matrix is - transposed. From bottom left to top right. - """ - if isinstance(row, np.ndarray): - row = row.tolist() - if isinstance(col, np.ndarray): - col = col.tolist() - rc_pairs = [(r, c) for r, c in zip(row, col)] - rc_pairs = sorted(rc_pairs, key=lambda i: i[1]) - rc_pairs = sorted(rc_pairs, key=lambda i: i[0]) - - row = [] - col = [] - for r, c in rc_pairs: - row.append(r) - col.append(c) + row = row[ind] + col = col[ind] return row, col commit 4179d294be2f9de687196614a2c0a3ebda245f13 Author: Blake Griffith Date: Fri Aug 9 19:36:14 2013 -0500 WIP: Give CSC its own .nonzero method. This assures that the indices returned in nonzero are ordered properly. diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index a62cc0b..55b84af 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -12,6 +12,7 @@ from scipy.lib.six.moves import xrange from .base import isspmatrix from .sparsetools import csc_tocsr +from . import sparsetools from .sputils import upcast, isintlike, IndexMixin from .compressed import _cs_matrix @@ -148,9 +149,30 @@ class csc_matrix(_cs_matrix, IndexMixin): return self.T[col, row].T # Things that return a sequence of values. else: - self._bl_to_tl_sort(row, col) return self.T[col, row] + def nonzero(self): + """CSC can't use _cs_matrix's .nonzero method because it + returns the indices sorted for self transposed. + """ + # Get row and col indices + major_dim, minor_dim = self._swap(self.shape) + minor_indices = self.indices + major_indices = np.empty(len(minor_indices), dtype=np.intc) + sparsetools.expandptr(major_dim,self.indptr, major_indices) + row, col = self._swap((major_indices, minor_indices)) + + row_out = row.copy() + col_out = col.copy() + + # Sort them to be in C-style order + ind = np.lexsort((col, row)) + for i, j in enumerate(ind): + row_out[i] = row[j] + col_out[i] = col[j] + + return row_out, col_out + def _bl_to_tl_sort(self, row, col): """ Sort indices so they are returned properly when the matrix is transposed. From bottom left to top right. commit b93af943e9783b6a072d05fffca192343f97a79e Author: Blake Griffith Date: Fri Aug 9 13:58:56 2013 -0500 DOC: Change comments show test_spares.py is now test_base.py. Unrelated to current PR. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 42911c6..49d7ac6 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -16,7 +16,7 @@ Build sparse: Run tests if scipy is installed: python -c 'import scipy;scipy.sparse.test()' Run tests if sparse is not installed: - python tests/test_sparse.py + python tests/test_base.py """ import warnings commit 80c406aee3f968b8c0decbc73ea9c451a245ddf8 Author: Blake Griffith Date: Fri Aug 9 13:56:53 2013 -0500 WIP: Refactored CSC __getitem__ to use changes in _unpack_index. This introduces a failing test due to a bug in CSC's .nonzero() where the indices are not sorted C-style as described here: http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#boolean This addresses @pv comments: https://github.com/scipy/scipy/pull/2689#discussion-diff-5670362 https://github.com/scipy/scipy/pull/2689#discussion-diff-5670402 diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index ff766b5..a62cc0b 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -142,29 +142,24 @@ class csc_matrix(_cs_matrix, IndexMixin): def __getitem__(self, key): """Use CSR to implement fancy indexing.""" row, col = self._unpack_index(key) - # [1, ?], [1:2, ?], [?, 1], [?, 1:2] - if isinstance(key, tuple) and (isintlike(row) or isinstance(row, slice) - or isintlike(col) or isinstance(col, slice)): + # Things that return submatrices. row or col is a int or slice. + if (isinstance(row, slice) or isinstance(col, slice) or + isintlike(row) or isintlike(col)): return self.T[col, row].T - # ndarrays or something else. Dense result. - elif isinstance(key, tuple): - return self.T[col, row] - # [bool ndarray] - elif isinstance(key, np.ndarray) and key.dtype.kind == 'b': - return self[row, col] - # [bool spmatrix] - elif isspmatrix(key) and key.dtype.kind == 'b': - row, col = self._bl_to_tl_sort(row, col) - return self.T[col, row] - # [i] or [1:2] + # Things that return a sequence of values. else: - return self.T[:, key].T + self._bl_to_tl_sort(row, col) + return self.T[col, row] def _bl_to_tl_sort(self, row, col): """ Sort indices so they are returned properly when the matrix is transposed. From bottom left to top right. """ - rc_pairs = [(r, c) for r, c in zip(row.tolist(), col.tolist())] + if isinstance(row, np.ndarray): + row = row.tolist() + if isinstance(col, np.ndarray): + col = col.tolist() + rc_pairs = [(r, c) for r, c in zip(row, col)] rc_pairs = sorted(rc_pairs, key=lambda i: i[1]) rc_pairs = sorted(rc_pairs, key=lambda i: i[0]) commit 62008ad8e0e399f2ea318c4057fdcc45749b14d1 Author: Blake Griffith Date: Fri Aug 9 12:17:54 2013 -0500 WIP: Refactor _unpack_index. Addresses comments by @pv: https://github.com/scipy/scipy/pull/2689#discussion-diff-5670335 https://github.com/scipy/scipy/pull/2689#discussion-diff-5670515 https://github.com/scipy/scipy/pull/2689#discussion-diff-5670621 diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index 613dc67..9316075 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -162,14 +162,13 @@ class IndexMixin(object): """ Parse index. Always return a tuple of the form (row, col). Where row/col is a integer, slice, or array of integers. """ - # First check for single boolean index. + # First, check if indexing with single boolean matrix. from .base import spmatrix # This feels dirty but... if (isinstance(index, (spmatrix, np.ndarray)) and - (index.shape == self.shape) and - index.dtype.kind == 'b'): + (index.ndim == 2) and index.dtype.kind == 'b'): return index.nonzero() - # First, parse the tuple or object + # Next, parse the tuple or object if isinstance(index, tuple): if len(index) == 2: row, col = index @@ -185,40 +184,23 @@ class IndexMixin(object): return row, col def _check_boolean(self, row, col): - from .base import isspmatrix # This feels dirty but... + from .base import isspmatrix # ew... + # Supporting sparse boolean indexing with both row and col does + # not work because spmatrix.ndim is always 2. if isspmatrix(row) or isspmatrix(col): raise IndexError("Indexing with sparse matrices is not supported" " except boolean indexing where matrix and index are equal" " shapes.") - if (isinstance(row, np.ndarray) and row.dtype.kind == 'b'): - # Check for equal shapes. - if (row.shape == self.shape): - if col != slice(None): - raise IndexError('invalid index shape') - else: - return row.nonzero() - + if isinstance(row, np.ndarray) and row.dtype.kind == 'b': row = self._boolean_index_to_array(row) - if len(row) == 2: - if isinstance(col, slice): - col = row[1] - else: - raise ValueError('too many indices for array') - row = row[0] - if isinstance(col, np.ndarray) and col.dtype.kind == 'b': - col = self._boolean_index_to_array(col)[0] + col = self._boolean_index_to_array(col) return row, col def _boolean_index_to_array(self, i): - if i.ndim < 2: - i = i.nonzero() - elif (i.shape[0] > self.shape[0] or i.shape[1] > self.shape[1] or - i.ndim > 2): + if i.ndim > 1: raise IndexError('invalid index shape') - else: - i = i.nonzero() - return i + return i.nonzero()[0] def _index_to_arrays(self, i, j): i, j = self._check_boolean(i, j) commit 6161d3cbdbfcc252b221475c8dbdbc2d427242b6 Author: Blake Griffith Date: Thu Aug 8 11:58:16 2013 -0500 WIP: Remove superflous check and warning. Check and warning are uneeded now that fancy indexing works. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index fc29a6c..86c264d 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -700,9 +700,6 @@ class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): def _set_one(self, row, col, val): """Set one value at a time.""" - if not (isscalarlike(row) and isscalarlike(col)): - raise NotImplementedError("Fancy indexing in assignment not " - "supported for csr matrices.") M, N = self.shape if (row < 0): row += M commit 672bc5314e8eed86e7cfa3af13c368b2796eb5e7 Author: Blake Griffith Date: Wed Aug 7 22:44:51 2013 -0500 WIP: Remove _check_boolean from csc.py and csr.py. diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index bd40dcf..ff766b5 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -140,26 +140,21 @@ class csc_matrix(_cs_matrix, IndexMixin): return A def __getitem__(self, key): - # use CSR to implement fancy indexing - # [?, ?] - if isinstance(key, tuple): # [?, ?] - row, col = self._unpack_index(key) - # [1, ?], [1:2, ?], [?, 1], [?, 1:2] - if (isintlike(row) or isinstance(row, slice) or isintlike(col) or - isinstance(col, slice)): - return self.T[col, row].T - # ndarrays or something else. Dense result. - else: - return self.T[col, row] + """Use CSR to implement fancy indexing.""" + row, col = self._unpack_index(key) + # [1, ?], [1:2, ?], [?, 1], [?, 1:2] + if isinstance(key, tuple) and (isintlike(row) or isinstance(row, slice) + or isintlike(col) or isinstance(col, slice)): + return self.T[col, row].T + # ndarrays or something else. Dense result. + elif isinstance(key, tuple): + return self.T[col, row] # [bool ndarray] elif isinstance(key, np.ndarray) and key.dtype.kind == 'b': - row, col = self._check_boolean(key, slice(None)) return self[row, col] # [bool spmatrix] elif isspmatrix(key) and key.dtype.kind == 'b': - row, col = self._check_boolean(key, slice(None)) row, col = self._bl_to_tl_sort(row, col) - return self.T[col, row] # [i] or [1:2] else: diff --git a/scipy/sparse/csr.py b/scipy/sparse/csr.py index 535659d..feda469 100644 --- a/scipy/sparse/csr.py +++ b/scipy/sparse/csr.py @@ -225,31 +225,30 @@ class csr_matrix(_cs_matrix, IndexMixin): row, col = self._unpack_index(key) - # Convert Boolean data type, otherwise asindices() sees 0s and 1s - row, col = self._check_boolean(row, col) - # First attempt to use original row optimized methods + # [1, ?] if isintlike(row): - # [1,??] + # [i, j] if isintlike(col): - return self._get_single_element(row, col) # [i,j] + return self._get_single_element(row, col) + # [i, 1:2] elif isinstance(col, slice): - return self._get_row_slice(row, col) # [i,1:2] + return self._get_row_slice(row, col) + # [i, [1, 2]] elif issequence(col): - P = extractor(col,self.shape[1]).T # [i,[1,2]] - return self[row,:] * P + P = extractor(col,self.shape[1]).T + return self[row, :] * P elif isinstance(row, slice): # [1:2,??] - if ((isintlike(col) and - row.step in (1, None)) or - (isinstance(col, slice) and + if ((isintlike(col) and row.step in (1, None)) or + (isinstance(col, slice) and col.step in (1, None) and row.step in (1, None))): # col is int or slice with step 1, row is slice with step 1. return self._get_submatrix(row, col) elif issequence(col): P = extractor(col,self.shape[1]).T # [1:2,[1,2]] - # row is slice, col is slice or sequence. + # row is slice, col is sequence. return self[row,:]*P elif issequence(row): # [[1,2],??] commit 90b450bc115dcfc96fb3d8ed65ccdde1b0459add Author: Blake Griffith Date: Wed Aug 7 22:41:06 2013 -0500 WIP: Modify _unpack_index to use _check_boolean. diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index 32d8123..613dc67 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -159,17 +159,56 @@ class IndexMixin(object): return np.arange(start, stop, step) def _unpack_index(self, index): - """ Parse index. + """ Parse index. Always return a tuple of the form (row, col). + Where row/col is a integer, slice, or array of integers. """ + # First check for single boolean index. + from .base import spmatrix # This feels dirty but... + if (isinstance(index, (spmatrix, np.ndarray)) and + (index.shape == self.shape) and + index.dtype.kind == 'b'): + return index.nonzero() + + # First, parse the tuple or object if isinstance(index, tuple): if len(index) == 2: - return index + row, col = index elif len(index) == 1: - return index[0], slice(None) + row, col = index[0], slice(None) else: raise IndexError('invalid number of indices') else: - return index, slice(None) + row, col = index, slice(None) + + # Next, check for validity, or transform the index as needed. + row, col = self._check_boolean(row, col) + return row, col + + def _check_boolean(self, row, col): + from .base import isspmatrix # This feels dirty but... + if isspmatrix(row) or isspmatrix(col): + raise IndexError("Indexing with sparse matrices is not supported" + " except boolean indexing where matrix and index are equal" + " shapes.") + if (isinstance(row, np.ndarray) and row.dtype.kind == 'b'): + # Check for equal shapes. + if (row.shape == self.shape): + if col != slice(None): + raise IndexError('invalid index shape') + else: + return row.nonzero() + + row = self._boolean_index_to_array(row) + if len(row) == 2: + if isinstance(col, slice): + col = row[1] + else: + raise ValueError('too many indices for array') + row = row[0] + + if isinstance(col, np.ndarray) and col.dtype.kind == 'b': + col = self._boolean_index_to_array(col)[0] + return row, col def _boolean_index_to_array(self, i): if i.ndim < 2: @@ -181,21 +220,6 @@ class IndexMixin(object): i = i.nonzero() return i - def _check_boolean(self, i, j): - from .base import isspmatrix # This feels dirty but... - if ((isinstance(i, np.ndarray) or isspmatrix(i)) and - i.dtype.kind == 'b'): - i = self._boolean_index_to_array(i) - if len(i) == 2: - if isinstance(j, slice): - j = i[1] - else: - raise ValueError('too many indices for array') - i = i[0] - if isinstance(j, np.ndarray) and j.dtype.kind == 'b': - j = self._boolean_index_to_array(j)[0] - return i, j - def _index_to_arrays(self, i, j): i, j = self._check_boolean(i, j) commit 9ef0ad125685c8a9fed56a4d553f750ee588aeb6 Author: Blake Griffith Date: Wed Aug 7 13:01:03 2013 -0500 STY: Whitespace fixes. Move and add comments. diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index 7f10a8d..bd40dcf 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -141,25 +141,29 @@ class csc_matrix(_cs_matrix, IndexMixin): def __getitem__(self, key): # use CSR to implement fancy indexing + # [?, ?] if isinstance(key, tuple): # [?, ?] row, col = self._unpack_index(key) - + # [1, ?], [1:2, ?], [?, 1], [?, 1:2] if (isintlike(row) or isinstance(row, slice) or isintlike(col) or - isinstance(col,slice)): - return self.T[col,row].T # [1, ?], [1:2, ?], [?, 1], [?, 1:2] + isinstance(col, slice)): + return self.T[col, row].T + # ndarrays or something else. Dense result. else: - # ndarrays or something else. Dense result. return self.T[col, row] + # [bool ndarray] elif isinstance(key, np.ndarray) and key.dtype.kind == 'b': row, col = self._check_boolean(key, slice(None)) return self[row, col] + # [bool spmatrix] elif isspmatrix(key) and key.dtype.kind == 'b': row, col = self._check_boolean(key, slice(None)) row, col = self._bl_to_tl_sort(row, col) return self.T[col, row] + # [i] or [1:2] else: - return self.T[:,key].T # [i] or [1:2] + return self.T[:, key].T def _bl_to_tl_sort(self, row, col): """ Sort indices so they are returned properly when the matrix is commit 9e48f6a1aab6c4b05241013ca34606ed43dff447 Author: Blake Griffith Date: Wed Aug 7 12:24:13 2013 -0500 WIP: Add comments. I'm trying to understand how CSC's __getitem__ works. Comments document its code paths. diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index dac5ae2..7f10a8d 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -141,13 +141,14 @@ class csc_matrix(_cs_matrix, IndexMixin): def __getitem__(self, key): # use CSR to implement fancy indexing - if isinstance(key, tuple): + if isinstance(key, tuple): # [?, ?] row, col = self._unpack_index(key) if (isintlike(row) or isinstance(row, slice) or isintlike(col) or isinstance(col,slice)): - return self.T[col,row].T + return self.T[col,row].T # [1, ?], [1:2, ?], [?, 1], [?, 1:2] else: + # ndarrays or something else. Dense result. return self.T[col, row] elif isinstance(key, np.ndarray) and key.dtype.kind == 'b': row, col = self._check_boolean(key, slice(None)) commit 464d69e37fdbb5f05aaaf6468d2b4af5eb8fbaba Author: Blake Griffith Date: Tue Aug 6 16:34:06 2013 -0500 WIP: Fix csc sparse boolean indexing sort issues. diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index 4a45239..dac5ae2 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -10,6 +10,7 @@ from warnings import warn import numpy as np from scipy.lib.six.moves import xrange +from .base import isspmatrix from .sparsetools import csc_tocsr from .sputils import upcast, isintlike, IndexMixin @@ -147,13 +148,34 @@ class csc_matrix(_cs_matrix, IndexMixin): isinstance(col,slice)): return self.T[col,row].T else: - return self.T[col,row] + return self.T[col, row] elif isinstance(key, np.ndarray) and key.dtype.kind == 'b': row, col = self._check_boolean(key, slice(None)) return self[row, col] + elif isspmatrix(key) and key.dtype.kind == 'b': + row, col = self._check_boolean(key, slice(None)) + row, col = self._bl_to_tl_sort(row, col) + + return self.T[col, row] else: return self.T[:,key].T # [i] or [1:2] + def _bl_to_tl_sort(self, row, col): + """ Sort indices so they are returned properly when the matrix is + transposed. From bottom left to top right. + """ + rc_pairs = [(r, c) for r, c in zip(row.tolist(), col.tolist())] + rc_pairs = sorted(rc_pairs, key=lambda i: i[1]) + rc_pairs = sorted(rc_pairs, key=lambda i: i[0]) + + row = [] + col = [] + for r, c in rc_pairs: + row.append(r) + col.append(c) + + return row, col + def getrow(self, i): """Returns a copy of row i of the matrix, as a (1 x n) CSR matrix (row vector). commit fb5c7654b7a79aa93e38b96ac757c390129d7a86 Author: Blake Griffith Date: Tue Aug 6 10:11:03 2013 -0500 WIP: Tests for indexing with sparse boolean matrices. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index edda6b5..42911c6 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1773,6 +1773,35 @@ class _TestFancyIndexing: assert_raises(IndexError, A.__getitem__, Y) assert_raises((IndexError, ValueError), A.__getitem__, (X, 1)) + def test_fancy_indexing_sparse_boolean(self): + random.seed(1234) # make runs repeatable + + # CSR matrix returns matrix in some cases + def todense(a): + if isinstance(a, (np.matrix, np.ndarray)): + return a + return a.todense() + + B = asmatrix(arange(50).reshape(5,10)) + A = self.spmatrix(B) + + X = np.array(np.random.randint(0, 2, size=(5, 10)), dtype=bool) + + Xsp = csr_matrix(X) + + assert_equal(todense(A[Xsp]), B[X]) + assert_equal(todense(A[A > 9]), B[B > 9]) + + Z = np.array(np.random.randint(0, 2, size=(5, 11)), dtype=bool) + Y = np.array(np.random.randint(0, 2, size=(6, 10)), dtype=bool) + + Zsp = csr_matrix(Z) + Ysp = csr_matrix(Y) + + assert_raises(IndexError, A.__getitem__, Zsp) + assert_raises(IndexError, A.__getitem__, Ysp) + assert_raises((IndexError, ValueError), A.__getitem__, (Xsp, 1)) + class _TestFancyIndexingAssign: def test_bad_index_assign(self): @@ -2663,6 +2692,10 @@ class TestLIL(sparse_test_class(minmax=False)): a *= 2. a[0, :] = 0 + @dec.knownfailureif(True, "Sparse boolean indexing unimplemented for LIL") + def test_fancy_indexing_sparse_boolean(self): + pass + class TestCOO(sparse_test_class(getset=False, slicing=False, slicing_assign=False, commit c957b4b1b08dfebdcbd4ab6e907164d340cf8212 Author: Blake Griffith Date: Tue Aug 6 10:14:04 2013 -0500 WIP: Support for indexing with sparse boolean matrices. diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index d42bca0..32d8123 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -182,7 +182,9 @@ class IndexMixin(object): return i def _check_boolean(self, i, j): - if isinstance(i, np.ndarray) and i.dtype.kind == 'b': + from .base import isspmatrix # This feels dirty but... + if ((isinstance(i, np.ndarray) or isspmatrix(i)) and + i.dtype.kind == 'b'): i = self._boolean_index_to_array(i) if len(i) == 2: if isinstance(j, slice): commit 2fff9cbeb86ac9800a04f3121ba7fd99f0728fab Author: Blake Griffith Date: Mon Aug 5 21:24:14 2013 -0500 STY: Address comments. diff --git a/scipy/sparse/coo.py b/scipy/sparse/coo.py index 4b3ae78..9321054 100644 --- a/scipy/sparse/coo.py +++ b/scipy/sparse/coo.py @@ -181,10 +181,8 @@ class coo_matrix(_data_matrix, _minmax_mixin): if np.rank(M) != 2: raise TypeError('expected rank <= 2 array or matrix') - elif M.size == 0: self.shape = (1,1) - else: self.shape = M.shape commit c23a0488c44d22d7b9f9f04c406ec986af11ce12 Author: Blake Griffith Date: Sun Aug 4 22:25:48 2013 -0500 WIP: Remove error, from DOK test. DOK raises a TypeError in this case, but the test expected a IndexError. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index c223ca0..edda6b5 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1284,7 +1284,7 @@ class _TestGetSet: assert_equal(A[i,j], D[i,j]) for ij in [(0,3),(-1,3),(4,0),(4,3),(4,-1), (1, 2, 3)]: - assert_raises(IndexError, A.__getitem__, ij) + assert_raises((IndexError, TypeError), A.__getitem__, ij) def test_setelement(self): A = self.spmatrix((3,4)) commit 9b20eb066213729ffe39b9d0b311ccbee461e83d Author: Blake Griffith Date: Sun Aug 4 22:08:17 2013 -0500 PEP8 diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 286ed99..c223ca0 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1639,7 +1639,7 @@ class _TestFancyIndexing: A = self.spmatrix(np.zeros([5, 5])) assert_raises((IndexError, ValueError, TypeError), A.__getitem__, "foo") assert_raises((IndexError, ValueError, TypeError), A.__getitem__, (2, "foo")) - assert_raises((IndexError, ValueError), A.__getitem__, + assert_raises((IndexError, ValueError), A.__getitem__, ([1, 2, 3], [1, 2, 3, 4])) def test_fancy_indexing(self): @@ -1747,7 +1747,7 @@ class _TestFancyIndexing: def todense(a): if isinstance(a, (np.matrix, np.ndarray)): return a - return a.todense() + return a.todense() B = asmatrix(arange(50).reshape(5,10)) A = self.spmatrix(B) @@ -1759,7 +1759,7 @@ class _TestFancyIndexing: assert_equal(todense(A[I]), B[I]) assert_equal(todense(A[:,J]), B[:, J]) assert_equal(todense(A[X]), B[X]) - assert_equal(todense(A[B > 9]), B[B>9]) + assert_equal(todense(A[B > 9]), B[B > 9]) I = np.array([True, False, True, True, False]) J = np.array([False, True, True, False, True]) @@ -2296,6 +2296,7 @@ class TestCSR(sparse_test_class()): SIJ = SIJ.todense() assert_equal(SIJ, D[I,J]) + class TestCSC(sparse_test_class()): spmatrix = csc_matrix checked_dtypes = [np.bool_, np.int_, np.float_, np.complex_] commit 19a50bcafb6d6d47e33af36c2cd4375961f65b85 Author: Blake Griffith Date: Sun Aug 4 22:07:51 2013 -0500 WIP: Correct slicing test to skip np.array(-1) indexing bug. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 35edb35..286ed99 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1486,13 +1486,31 @@ class _TestSlicing: for i, a in enumerate(slices): for j, b in enumerate(slices): - x = A[a,b] - y = B[a,b] - - if b == np.array(-1) and isinstance(b, np.ndarray): - # Bug in np.matrix - # https://github.com/numpy/numpy/issues/3110 - y = B[a,-1] + # if, bad case. + # Note that this bug was causing np.array(-1) to change + # values, so checking b == -1 was not enough. + # Bug in np.matrix + # https://github.com/numpy/numpy/issues/3110 + msg = "Indexing with np.array(-1) is problematic." + if isinstance(b, np.ndarray): + if b != -1: + x = A[a, -1] + y = B[a, -1] + yield dec.skipif( + True, msg)( + assert_array_equal)( + x.todense(), y) + elif isinstance(a, np.ndarray): + if a != -1: + x = A[-1, b] + y = B[-1, b] + yield dec.skipif(True, msg)( + assert_array_equal)( + x.todense(), y) + # else, good case + else: + x = A[a, b] + y = B[a, b] if y.shape == (): assert_equal(x, y, repr((a, b))) commit 852e9cb2321a1799f9b9ac8e64f73ccab8d6b6d6 Author: Blake Griffith Date: Sun Aug 4 22:03:47 2013 -0500 WIP: Remove slicing with step != 1 bug. Bug only occured when col index was nonzero too. diff --git a/scipy/sparse/csr.py b/scipy/sparse/csr.py index ef8367a..535659d 100644 --- a/scipy/sparse/csr.py +++ b/scipy/sparse/csr.py @@ -240,12 +240,16 @@ class csr_matrix(_cs_matrix, IndexMixin): return self[row,:] * P elif isinstance(row, slice): # [1:2,??] - if isintlike(col) or (isinstance(col, slice) and - col.step in (1, None) and - row.step in (1, None)): - return self._get_submatrix(row, col) # [1:2,j] + if ((isintlike(col) and + row.step in (1, None)) or + (isinstance(col, slice) and + col.step in (1, None) and + row.step in (1, None))): + # col is int or slice with step 1, row is slice with step 1. + return self._get_submatrix(row, col) elif issequence(col): P = extractor(col,self.shape[1]).T # [1:2,[1,2]] + # row is slice, col is slice or sequence. return self[row,:]*P elif issequence(row): # [[1,2],??] commit c99ee15969ccc4ad296e0d9f58ff9e48bb628863 Author: Blake Griffith Date: Sun Aug 4 17:51:03 2013 -0500 WIP: Change COO empty matrix creation from size 0 dense matrix. coo_matrix now makes an 1x1 empty sparse matrix when given a size 0 dense matrix. This is to resolve issue when indexing would return an empty matrix. diff --git a/scipy/sparse/coo.py b/scipy/sparse/coo.py index ac78456..4b3ae78 100644 --- a/scipy/sparse/coo.py +++ b/scipy/sparse/coo.py @@ -182,7 +182,12 @@ class coo_matrix(_data_matrix, _minmax_mixin): if np.rank(M) != 2: raise TypeError('expected rank <= 2 array or matrix') - self.shape = M.shape + elif M.size == 0: + self.shape = (1,1) + + else: + self.shape = M.shape + self.row, self.col = M.nonzero() self.data = M[self.row, self.col] commit d5ce19fbf82de0ce87d59eeec4381f8e2e4e8e0c Author: Blake Griffith Date: Fri Aug 2 16:33:50 2013 -0500 WIP: Comments. diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index 6c87065..d42bca0 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -152,6 +152,9 @@ class IndexMixin(object): This class simply exists to hold the methods necessary for fancy indexing. """ def _slicetoarange(self, j, shape): + """ Given a slice object, use numpy arange to change it to a 1D + array. + """ start, stop, step = j.indices(shape) return np.arange(start, stop, step) commit 09a51b53017b5f8f71c8457ac6c49678b914f788 Author: Blake Griffith Date: Tue Jul 30 16:46:33 2013 -0500 refactor diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index 3fea121..fc29a6c 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -579,6 +579,25 @@ class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): else: raise IndexError("invalid index") + def __setitem__(self, index, x): + # Process arrays from IndexMixin + i, j = self._unpack_index(index) + i, j = self._index_to_arrays(i, j) + + if isspmatrix(x): + x = x.toarray() + + # Make x and i into the same shape + x = np.asarray(x, dtype=self.dtype) + x, _ = np.broadcast_arrays(x, i) + + if x.shape != i.shape: + raise ValueError("shape mismatch in assignment") + + # Set values + for ii, jj, xx in zip(i.ravel(), j.ravel(), x.ravel()): + self._set_one(ii, jj, xx) + def _get_single_element(self,row,col): M, N = self.shape if (row < 0): @@ -679,25 +698,6 @@ class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): return self.__class__((data, indices, indptr), shape=shape) - def __setitem__(self, index, x): - # Process arrays from IndexMixin - i, j = self._unpack_index(index) - i, j = self._index_to_arrays(i, j) - - if isspmatrix(x): - x = x.toarray() - - # Make x and i into the same shape - x = np.asarray(x, dtype=self.dtype) - x, _ = np.broadcast_arrays(x, i) - - if x.shape != i.shape: - raise ValueError("shape mismatch in assignment") - - # Set values - for ii, jj, xx in zip(i.ravel(), j.ravel(), x.ravel()): - self._set_one(ii, jj, xx) - def _set_one(self, row, col, val): """Set one value at a time.""" if not (isscalarlike(row) and isscalarlike(col)): commit 1db6755c8ede72a88d357c92e6df9984148d26f1 Author: Blake Griffith Date: Tue Jul 30 11:06:26 2013 -0500 WIP: Change name. _check_Boolean -> _check_boolean. diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index 6627123..4a45239 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -149,7 +149,7 @@ class csc_matrix(_cs_matrix, IndexMixin): else: return self.T[col,row] elif isinstance(key, np.ndarray) and key.dtype.kind == 'b': - row, col = self._check_Boolean(key, slice(None)) + row, col = self._check_boolean(key, slice(None)) return self[row, col] else: return self.T[:,key].T # [i] or [1:2] diff --git a/scipy/sparse/csr.py b/scipy/sparse/csr.py index 6e6a792..ef8367a 100644 --- a/scipy/sparse/csr.py +++ b/scipy/sparse/csr.py @@ -226,7 +226,7 @@ class csr_matrix(_cs_matrix, IndexMixin): row, col = self._unpack_index(key) # Convert Boolean data type, otherwise asindices() sees 0s and 1s - row, col = self._check_Boolean(row, col) + row, col = self._check_boolean(row, col) # First attempt to use original row optimized methods if isintlike(row): diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index dd22996..6c87065 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -178,7 +178,7 @@ class IndexMixin(object): i = i.nonzero() return i - def _check_Boolean(self, i, j): + def _check_boolean(self, i, j): if isinstance(i, np.ndarray) and i.dtype.kind == 'b': i = self._boolean_index_to_array(i) if len(i) == 2: @@ -192,7 +192,7 @@ class IndexMixin(object): return i, j def _index_to_arrays(self, i, j): - i, j = self._check_Boolean(i, j) + i, j = self._check_boolean(i, j) i_slice = isinstance(i, slice) if i_slice: commit f495abb2786700a08ef65c279d4f4f91d4f1d056 Author: Blake Griffith Date: Sat Jun 29 17:42:09 2013 -0500 PEP8 & STY diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index c01a4f1..3fea121 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -698,9 +698,8 @@ class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): for ii, jj, xx in zip(i.ravel(), j.ravel(), x.ravel()): self._set_one(ii, jj, xx) - def _set_one(self, row, col, val): - """ Set one value at a time """ + """Set one value at a time.""" if not (isscalarlike(row) and isscalarlike(col)): raise NotImplementedError("Fancy indexing in assignment not " "supported for csr matrices.") @@ -709,28 +708,27 @@ class _cs_matrix(_data_matrix, _minmax_mixin, IndexMixin): row += M if (col < 0): col += N - if not (0<=row Date: Sun Aug 4 16:32:56 2013 -0500 DOC: Add scipy.sparse release notes. diff --git a/doc/release/0.13.0-notes.rst b/doc/release/0.13.0-notes.rst index 45a776f..a0b312c 100644 --- a/doc/release/0.13.0-notes.rst +++ b/doc/release/0.13.0-notes.rst @@ -60,10 +60,26 @@ The BLAS functions ``symm``, ``syrk``, ``syr2k``, ``hemm``, ``herk`` and ``scipy.sparse`` improvements ----------------------------- -Boolean sparse matrices -^^^^^^^^^^^^^^^^^^^^^^^ +Boolean comparisons and sparse matrices +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -All sparse matrix types now support boolean data. +All sparse matrix types now support boolean data, and boolean operations. +So if we have two sparse matrices `A` and `B`, they can be compared in all the +expected ways: `A < B`, `A >= B`, `A != B`, etc. And return sparse matrices. +Comparisons with dense matrices and scalars are also supported. + +CSR and CSC fancy indexing +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Compressed sparse row and column sparse matrix types now support fancy indexing +with boolean matrices, slices, and lists. So where A is a (CSC or CSR) sparse +matrix, you can do things like:: + + >>> A[A > 0.5] = 1 # Since Boolean sparse matrices work! + + >>> A[:2, :3] = 2 + + >>> A[[1,2], 2] = 3 ``scipy.special`` improvements commit e70d982e57adafad3a26c931ac296907fc2ae412 Author: Blake Griffith Date: Thu Jun 27 23:08:02 2013 -0500 BUG: More general boolean comparisons for complex_ops.h to fix compiler errors. diff --git a/scipy/sparse/sparsetools/complex_ops.h b/scipy/sparse/sparsetools/complex_ops.h index 154305e..a28d84f 100644 --- a/scipy/sparse/sparsetools/complex_ops.h +++ b/scipy/sparse/sparsetools/complex_ops.h @@ -14,10 +14,12 @@ class complex_wrapper : public npy_type { friend std::ostream& operator<<(std::ostream&, const complex_wrapper& ); public: + /* Constructor */ complex_wrapper( const c_type r = c_type(0), const c_type i = c_type(0) ){ npy_type::real = r; npy_type::imag = i; } + /* Conversion */ operator bool() const { if (npy_type::real == 0 && npy_type::imag == 0) { return false; @@ -25,6 +27,7 @@ class complex_wrapper : public npy_type { return true; } } + /* Operators */ complex_wrapper operator-() const { return complex_wrapper(-npy_type::real,-npy_type::imag); } @@ -45,6 +48,7 @@ class complex_wrapper : public npy_type { result.imag = (npy_type::imag * B.real - npy_type::real * B.imag) * denom; return result; } + /* in-place operators */ complex_wrapper& operator+=(const complex_wrapper & B){ npy_type::real += B.real; npy_type::imag += B.imag; @@ -103,36 +107,42 @@ class complex_wrapper : public npy_type { return npy_type::real >= B.real; } } - bool operator==(const c_type& B) const{ - return npy_type::real == B && npy_type::imag == c_type(0); + template + bool operator==(const T& B) const{ + return npy_type::real == B && npy_type::imag == T(0); } - bool operator!=(const c_type& B) const{ - return npy_type::real != B || npy_type::imag != c_type(0); + template + bool operator!=(const T& B) const{ + return npy_type::real != B || npy_type::imag != T(0); } - bool operator<(const c_type& B) const{ + template + bool operator<(const T& B) const{ if (npy_type::real == B) { - return npy_type::imag < c_type(0); + return npy_type::imag < T(0); } else { return npy_type::real < B; } } - bool operator>(const c_type& B) const{ + template + bool operator>(const T& B) const{ if (npy_type::real == B) { - return npy_type::imag > c_type(0); + return npy_type::imag > T(0); } else { return npy_type::real > B; } } - bool operator<=(const c_type& B) const{ + template + bool operator<=(const T& B) const{ if (npy_type::real == B) { - return npy_type::imag <= c_type(0); + return npy_type::imag <= T(0); } else { return npy_type::real <= B; } } - bool operator>=(const c_type& B) const{ + template + bool operator>=(const T& B) const{ if (npy_type::real == B) { - return npy_type::imag >= c_type(0); + return npy_type::imag >= T(0); } else { return npy_type::real >= B; } commit 8d329c55ad165fc6e8f0ea6d65261edebe816ee6 Author: Blake Griffith Date: Fri Jun 28 15:28:20 2013 -0500 STY: Added parenthese for clarity as suggested by PV. diff --git a/scipy/sparse/sparsetools/bool_ops.h b/scipy/sparse/sparsetools/bool_ops.h index 17d7149..69dcb01 100644 --- a/scipy/sparse/sparsetools/bool_ops.h +++ b/scipy/sparse/sparsetools/bool_ops.h @@ -30,15 +30,15 @@ class npy_bool_wrapper { return (*this); } npy_bool_wrapper operator+(const npy_bool_wrapper& x) { - return x || value ? 1 : 0; + return (x || value) ? 1 : 0; } /* inplace operators */ npy_bool_wrapper operator+=(const npy_bool_wrapper& x) { - value = x || value ? 1 : 0; + value = (x || value) ? 1 : 0; return (*this); } npy_bool_wrapper operator*=(const npy_bool_wrapper& x) { - value = value && x ? 1 : 0; + value = (value && x) ? 1 : 0; return (*this); } /* constructors */ @@ -47,7 +47,7 @@ class npy_bool_wrapper { } template npy_bool_wrapper(T x) { - value = x ? 1 : 0; + value = (x) ? 1 : 0; } }; commit 2b543852506e4b61ef9e0f09afeb231f4b996829 Author: Blake Griffith Date: Fri Jun 28 15:24:28 2013 -0500 BUG: Readded 2 line I accidentally deleted. diff --git a/scipy/sparse/sparsetools/bool_ops.h b/scipy/sparse/sparsetools/bool_ops.h index 0bf462c..17d7149 100644 --- a/scipy/sparse/sparsetools/bool_ops.h +++ b/scipy/sparse/sparsetools/bool_ops.h @@ -1,3 +1,5 @@ +#ifndef BOOL_OPS_H +#define BOOL_OPS_H /* * Functions to handle arithmetic operations on NumPy Bool values. */ commit 1eb15a942910167581d957e648fbf24b400eba1b Author: Blake Griffith Date: Fri Jun 28 15:11:51 2013 -0500 ENH: Added macro to check at compile time that np_bool_wrapper is the correct size. diff --git a/scipy/sparse/sparsetools/bool_ops.h b/scipy/sparse/sparsetools/bool_ops.h index eb844d8..0bf462c 100644 --- a/scipy/sparse/sparsetools/bool_ops.h +++ b/scipy/sparse/sparsetools/bool_ops.h @@ -1,11 +1,15 @@ -#ifndef BOOL_OPS_H -#define BOOL_OPS_H - /* * Functions to handle arithmetic operations on NumPy Bool values. */ - #include +#include + +/* + * A compiler time (ct) assert macro from + * http://www.pixelbeat.org/programming/gcc/static_assert.html + * This is used to assure that npy_bool_wrapper is the right size. + */ +#define ct_assert(e) extern char (*ct_assert(void)) [sizeof(char[1 - 2*!(e)])] class npy_bool_wrapper { public: @@ -45,4 +49,6 @@ class npy_bool_wrapper { } }; +ct_assert(sizeof(char) == sizeof(npy_bool_wrapper)); + #endif commit f21a40350be37fb20d3cbef12e28b0033cafa7fa Author: Blake Griffith Date: Thu Jun 27 14:08:02 2013 -0500 TST: Added bool rollover test. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 270d051..ec72795 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -89,6 +89,18 @@ class _TestCommon: msg = "Cannot create a rank <= 2 DOK matrix." yield dec.skipif(fails, msg)(check), dtype + def test_bool_rollover(self): + """bool's underlying dtype is 1 byte, check that it does not + rollover True -> False at 256. + """ + dat = np.matrix([[True, False]]) + datsp = self.spmatrix(dat) + + for _ in range(10): + datsp = datsp + datsp + dat = dat + dat + assert_array_equal(dat, datsp.todense()) + def test_eq(self): def check(dtype): dat = self.dat_dtypes[dtype] commit 22e71a2a9cfe229a9c9e8495df1973be3887a932 Author: Blake Griffith Date: Wed Jun 26 19:42:05 2013 -0500 STY: Homogenized spacings. diff --git a/scipy/sparse/sparsetools/sparsetools.i b/scipy/sparse/sparsetools/sparsetools.i index d4509e6..efa74ff 100644 --- a/scipy/sparse/sparsetools/sparsetools.i +++ b/scipy/sparse/sparsetools/sparsetools.i @@ -175,7 +175,7 @@ DECLARE_DATA_TYPE( npy_clongdouble_wrapper ) %define INSTANTIATE_ALL( f_name ) /* 32-bit indices */ -%template(f_name) f_name; +%template(f_name) f_name; %template(f_name) f_name; %template(f_name) f_name; %template(f_name) f_name; commit be0b21bec036fe46be1f0dcb56d2e18296acce46 Author: Blake Griffith Date: Wed Jun 26 19:41:25 2013 -0500 ENH: new bool_ops.h diff --git a/scipy/sparse/sparsetools/bool_ops.h b/scipy/sparse/sparsetools/bool_ops.h index faccdfe..eb844d8 100644 --- a/scipy/sparse/sparsetools/bool_ops.h +++ b/scipy/sparse/sparsetools/bool_ops.h @@ -7,6 +7,42 @@ #include -typedef npy_int8 npy_bool_wrapper; +class npy_bool_wrapper { + public: + char value; + + /* operators */ + operator char() const { + if(value != 0) { + return 1; + } else { + return 0; + } + } + npy_bool_wrapper& operator=(const npy_bool_wrapper& x) { + value = x; + return (*this); + } + npy_bool_wrapper operator+(const npy_bool_wrapper& x) { + return x || value ? 1 : 0; + } + /* inplace operators */ + npy_bool_wrapper operator+=(const npy_bool_wrapper& x) { + value = x || value ? 1 : 0; + return (*this); + } + npy_bool_wrapper operator*=(const npy_bool_wrapper& x) { + value = value && x ? 1 : 0; + return (*this); + } + /* constructors */ + npy_bool_wrapper() { + value = 0; + } + template + npy_bool_wrapper(T x) { + value = x ? 1 : 0; + } +}; #endif commit 34ecef7e27dc9e67ce4e73cacfc137f57285f160 Author: Blake Griffith Date: Tue Jun 25 14:44:51 2013 -0500 STY: Changed idiom for filling matrices as suggested by pv. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index e73cfab..3b581c3 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -234,7 +234,9 @@ class _cs_matrix(_data_matrix, _minmax_mixin): if 0 < other: warn("Comparing a sparse matrix with a scalar greater than " "zero using < is inefficient, try using > instead.", SparseEfficiencyWarning) - other_arr = self.__class__(np.ones(self.shape)) * other + other_arr = np.empty(self.shape) + other_arr.fill(other) + other_arr = self.__class__(other_arr) return self._binopt(other_arr, '_lt_') else: other_arr = self.copy() @@ -260,7 +262,9 @@ class _cs_matrix(_data_matrix, _minmax_mixin): if 0 > other: warn("Comparing a sparse matrix with a scalar less than zero " "using > is inefficient, try using < instead.", SparseEfficiencyWarning) - other_arr = self.__class__(np.ones(self.shape)) * other + other_arr = np.empty(self.shape) + other_arr.fill(other) + other_arr = self.__class__(other_arr) return self._binopt(other_arr, '_gt_') else: other_arr = self.copy() @@ -288,7 +292,9 @@ class _cs_matrix(_data_matrix, _minmax_mixin): elif 0 <= other: warn("Comparing a sparse matrix with a scalar less than zero " "using <= is inefficient, try using < instead.", SparseEfficiencyWarning) - other_arr = self.__class__(np.ones(self.shape)) * other + other_arr = np.empty(self.shape) + other_arr.fill(other) + other_arr = self.__class__(other_arr) return self._binopt(other_arr, '_le_') else: # Casting as other's type avoids corner case like @@ -322,7 +328,9 @@ class _cs_matrix(_data_matrix, _minmax_mixin): elif 0 >= other: warn("Comparing a sparse matrix with a scalar greater than zero" " using >= is inefficient, try using < instead.", SparseEfficiencyWarning) - other_arr = self.__class__(np.ones(self.shape)) * other + other_arr = np.empty(self.shape) + other_arr.fill(other) + other_arr = self.__class__(other_arr) return self._binopt(other_arr, '_ge_') else: other_arr = self.astype(type(other)).copy() commit 322e41d037c920ad162a90450c54f2a45e6710fa Author: Blake Griffith Date: Tue Jun 25 09:33:59 2013 -0500 TST: Test for sparse inequalities with scalars on LHS. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index ddbf6f8..270d051 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -145,6 +145,8 @@ class _TestCommon: # sparse/scalar assert_array_equal(dat != 0, (datsp != 0).todense()) assert_array_equal(dat != 1, (datsp != 1).todense()) + assert_array_equal(0 != dat, (0 != datsp).todense()) + assert_array_equal(1 != dat, (1 != datsp).todense()) for dtype in self.checked_dtypes: fails = not (self.__class__ == TestBSR or @@ -186,11 +188,17 @@ class _TestCommon: assert_array_equal(dat < dat2, datsp < dat2) assert_array_equal(datcomplex < dat2, datspcomplex < dat2) # sparse/scalar - assert_array_equal(dat < 2, (datsp < 2).todense()) - assert_array_equal(dat < 1, (datsp < 1).todense()) - assert_array_equal(dat < 0, (datsp < 0).todense()) - assert_array_equal(dat < -1, (datsp < -1).todense()) - assert_array_equal(dat < -2, (datsp < -2).todense()) + assert_array_equal((datsp < 2).todense(), dat < 2) + assert_array_equal((datsp < 1).todense(), dat < 1) + assert_array_equal((datsp < 0).todense(), dat < 0) + assert_array_equal((datsp < -1).todense(), dat < -1) + assert_array_equal((datsp < -2).todense(), dat < -2) + + assert_array_equal((2 < datsp).todense(), 2 < dat) + assert_array_equal((1 < datsp).todense(), 1 < dat) + assert_array_equal((0 < datsp).todense(), 0 < dat) + assert_array_equal((-1 < datsp).todense(), -1 < dat) + assert_array_equal((-2 < datsp).todense(), -2 < dat) def check_fail(dtype): # data @@ -246,11 +254,17 @@ class _TestCommon: assert_array_equal(dat > dat2, datsp > dat2) assert_array_equal(datcomplex > dat2, datspcomplex > dat2) # sparse/scalar - assert_array_equal(dat > 2, (datsp > 2).todense()) - assert_array_equal(dat > 1, (datsp > 1).todense()) - assert_array_equal(dat > 0, (datsp > 0).todense()) - assert_array_equal(dat > -1, (datsp > -1).todense()) - assert_array_equal(dat > -2, (datsp > -2).todense()) + assert_array_equal((datsp > 2).todense(), dat > 2) + assert_array_equal((datsp > 1).todense(), dat > 1) + assert_array_equal((datsp > 0).todense(), dat > 0) + assert_array_equal((datsp > -1).todense(), dat > -1) + assert_array_equal((datsp > -2).todense(), dat > -2) + + assert_array_equal((2 > datsp).todense(), 2 > dat) + assert_array_equal((1 > datsp).todense(), 1 > dat) + assert_array_equal((0 > datsp).todense(), 0 > dat) + assert_array_equal((-1 > datsp).todense(), -1 > dat) + assert_array_equal((-2 > datsp).todense(), -2 > dat) def check_fail(dtype): # data @@ -306,10 +320,15 @@ class _TestCommon: assert_array_equal(datsp <= dat2, dat <= dat2) assert_array_equal(datspcomplex <= dat2, datcomplex <= dat2) # sparse/scalar - assert_array_equal(dat <= 2, (datsp <= 2).todense()) - assert_array_equal(dat <= 1, (datsp <= 1).todense()) - assert_array_equal(dat <= -1, (datsp <= -1).todense()) - assert_array_equal(dat <= -2, (datsp <= -2).todense()) + assert_array_equal((datsp <= 2).todense(), dat <= 2) + assert_array_equal((datsp <= 1).todense(), dat <= 1) + assert_array_equal((datsp <= -1).todense(), dat <= -1) + assert_array_equal((datsp <= -2).todense(), dat <= -2) + + assert_array_equal((2 <= datsp).todense(), 2 <= dat) + assert_array_equal((1 <= datsp).todense(), 1 <= dat) + assert_array_equal((-1 <= datsp).todense(), -1 <= dat) + assert_array_equal((-2 <= datsp).todense(), -2 <= dat) def check_fail(dtype): # data @@ -366,10 +385,15 @@ class _TestCommon: assert_array_equal(datsp >= dat2, dat >= dat2) assert_array_equal(datspcomplex >= dat2, datcomplex >= dat2) # sparse/scalar - assert_array_equal(dat >= 2, (datsp >= 2).todense()) - assert_array_equal(dat >= 1, (datsp >= 1).todense()) - assert_array_equal(dat >= -1, (datsp >= -1).todense()) - assert_array_equal(dat >= -2, (datsp >= -2).todense()) + assert_array_equal((datsp >= 2).todense(), dat >= 2) + assert_array_equal((datsp >= 1).todense(), dat >= 1) + assert_array_equal((datsp >= -1).todense(), dat >= -1) + assert_array_equal((datsp >= -2).todense(), dat >= -2) + + assert_array_equal((2 >= datsp).todense(), 2 >= dat) + assert_array_equal((1 >= datsp).todense(), 1 >= dat) + assert_array_equal((-1 >= datsp).todense(), -1 >= dat) + assert_array_equal((-2 >= datsp).todense(), -2 >= dat) def check_fail(dtype): # data commit 6411fea4af40416dc0c931730641fb44d65cb97f Author: Blake Griffith Date: Tue Jun 18 17:52:48 2013 -0500 TST: Add tests for inequalities. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 0df4714..ddbf6f8 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -89,7 +89,6 @@ class _TestCommon: msg = "Cannot create a rank <= 2 DOK matrix." yield dec.skipif(fails, msg)(check), dtype - def test_eq(self): def check(dtype): dat = self.dat_dtypes[dtype] @@ -119,7 +118,7 @@ class _TestCommon: fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or self.__class__ == TestCSR) - msg = "Bool comparisons only implemented for CSC and CSR." + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." yield dec.skipif(fails, msg)(check), dtype def test_ne(self): @@ -151,9 +150,249 @@ class _TestCommon: fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or self.__class__ == TestCSR) - msg = "Bool comparisons only implemented for CSC and CSR." + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + yield dec.skipif(fails, msg)(check), dtype + + def test_lt(self): + def check(dtype): + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datcomplex = dat.copy() + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spmatrix(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal(dat < dat2, (datsp < datsp2).todense()) + assert_array_equal(datcomplex < dat2, (datspcomplex < datsp2).todense()) + # mix sparse types + assert_array_equal(dat < dat2, (datbsr < datsp2).todense()) + assert_array_equal(dat < dat2, (datcsc < datsp2).todense()) + assert_array_equal(dat < dat2, (datcsr < datsp2).todense()) + assert_array_equal(dat < dat2, (datlil < datsp2).todense()) + + assert_array_equal(dat2 < dat, (datsp2 < datbsr).todense()) + assert_array_equal(dat2 < dat, (datsp2 < datcsc).todense()) + assert_array_equal(dat2 < dat, (datsp2 < datcsr).todense()) + assert_array_equal(dat2 < dat, (datsp2 < datlil).todense()) + # sparse/dense + assert_array_equal(dat < dat2, datsp < dat2) + assert_array_equal(datcomplex < dat2, datspcomplex < dat2) + # sparse/scalar + assert_array_equal(dat < 2, (datsp < 2).todense()) + assert_array_equal(dat < 1, (datsp < 1).todense()) + assert_array_equal(dat < 0, (datsp < 0).todense()) + assert_array_equal(dat < -1, (datsp < -1).todense()) + assert_array_equal(dat < -2, (datsp < -2).todense()) + + def check_fail(dtype): + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + + # dense rhs fails + assert_array_equal(dat < datsp2, datsp < dat2) + + for dtype in self.checked_dtypes: + fails = not (self.__class__ == TestBSR or + self.__class__ == TestCSC or + self.__class__ == TestCSR) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." yield dec.skipif(fails, msg)(check), dtype + for dtype in self.checked_dtypes: + msg = "Dense rhs is not supported for inequalities." + yield dec.knownfailureif(True, msg)(check_fail), dtype + + def test_gt(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datcomplex = dat.copy() + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spmatrix(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal(dat > dat2, (datsp > datsp2).todense()) + assert_array_equal(datcomplex > dat2, (datspcomplex > datsp2).todense()) + # mix sparse types + assert_array_equal(dat > dat2, (datbsr > datsp2).todense()) + assert_array_equal(dat > dat2, (datcsc > datsp2).todense()) + assert_array_equal(dat > dat2, (datcsr > datsp2).todense()) + assert_array_equal(dat > dat2, (datlil > datsp2).todense()) + + assert_array_equal(dat2 > dat, (datsp2 > datbsr).todense()) + assert_array_equal(dat2 > dat, (datsp2 > datcsc).todense()) + assert_array_equal(dat2 > dat, (datsp2 > datcsr).todense()) + assert_array_equal(dat2 > dat, (datsp2 > datlil).todense()) + # sparse/dense + assert_array_equal(dat > dat2, datsp > dat2) + assert_array_equal(datcomplex > dat2, datspcomplex > dat2) + # sparse/scalar + assert_array_equal(dat > 2, (datsp > 2).todense()) + assert_array_equal(dat > 1, (datsp > 1).todense()) + assert_array_equal(dat > 0, (datsp > 0).todense()) + assert_array_equal(dat > -1, (datsp > -1).todense()) + assert_array_equal(dat > -2, (datsp > -2).todense()) + + def check_fail(dtype): + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + + # dense rhs fails + assert_array_equal(dat > datsp2, datsp > dat2) + + for dtype in self.checked_dtypes: + fails = not (self.__class__ == TestBSR or + self.__class__ == TestCSC or + self.__class__ == TestCSR) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + yield dec.skipif(fails, msg)(check), dtype + + for dtype in self.checked_dtypes: + msg = "Dense rhs is not supported for inequalities." + yield dec.knownfailureif(True, msg)(check_fail), dtype + + def test_le(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datcomplex = dat.copy() + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spmatrix(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal(dat <= dat2, (datsp <= datsp2).todense()) + assert_array_equal(datcomplex <= dat2, (datspcomplex <= datsp2).todense()) + # mix sparse types + assert_array_equal((datbsr <= datsp2).todense(), dat <= dat2) + assert_array_equal((datcsc <= datsp2).todense(), dat <= dat2) + assert_array_equal((datcsr <= datsp2).todense(), dat <= dat2) + assert_array_equal((datlil <= datsp2).todense(), dat <= dat2) + + assert_array_equal((datsp2 <= datbsr).todense(), dat2 <= dat) + assert_array_equal((datsp2 <= datcsc).todense(), dat2 <= dat) + assert_array_equal((datsp2 <= datcsr).todense(), dat2 <= dat) + assert_array_equal((datsp2 <= datlil).todense(), dat2 <= dat) + # sparse/dense + assert_array_equal(datsp <= dat2, dat <= dat2) + assert_array_equal(datspcomplex <= dat2, datcomplex <= dat2) + # sparse/scalar + assert_array_equal(dat <= 2, (datsp <= 2).todense()) + assert_array_equal(dat <= 1, (datsp <= 1).todense()) + assert_array_equal(dat <= -1, (datsp <= -1).todense()) + assert_array_equal(dat <= -2, (datsp <= -2).todense()) + + def check_fail(dtype): + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + + # dense rhs fails + assert_array_equal(dat <= datsp2, datsp <= dat2) + + for dtype in self.checked_dtypes: + fails = not (self.__class__ == TestBSR or + self.__class__ == TestCSC or + self.__class__ == TestCSR) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + yield dec.skipif(fails, msg)(check), dtype + + for dtype in self.checked_dtypes: + msg = "Dense rhs is not supported for inequalities." + yield dec.knownfailureif(True, msg)(check_fail), dtype + + def test_ge(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datcomplex = dat.copy() + datcomplex[:,0] = 1 + 1j + datspcomplex = self.spmatrix(datcomplex) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal(dat >= dat2, (datsp >= datsp2).todense()) + assert_array_equal(datcomplex >= dat2, (datspcomplex >= datsp2).todense()) + # mix sparse types + # mix sparse types + assert_array_equal((datbsr >= datsp2).todense(), dat >= dat2) + assert_array_equal((datcsc >= datsp2).todense(), dat >= dat2) + assert_array_equal((datcsr >= datsp2).todense(), dat >= dat2) + assert_array_equal((datlil >= datsp2).todense(), dat >= dat2) + + assert_array_equal((datsp2 >= datbsr).todense(), dat2 >= dat) + assert_array_equal((datsp2 >= datcsc).todense(), dat2 >= dat) + assert_array_equal((datsp2 >= datcsr).todense(), dat2 >= dat) + assert_array_equal((datsp2 >= datlil).todense(), dat2 >= dat) + # sparse/dense + assert_array_equal(datsp >= dat2, dat >= dat2) + assert_array_equal(datspcomplex >= dat2, datcomplex >= dat2) + # sparse/scalar + assert_array_equal(dat >= 2, (datsp >= 2).todense()) + assert_array_equal(dat >= 1, (datsp >= 1).todense()) + assert_array_equal(dat >= -1, (datsp >= -1).todense()) + assert_array_equal(dat >= -2, (datsp >= -2).todense()) + + def check_fail(dtype): + # data + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + + # dense rhs fails + assert_array_equal(dat >= datsp2, datsp >= dat2) + + for dtype in self.checked_dtypes: + fails = not (self.__class__ == TestBSR or + self.__class__ == TestCSC or + self.__class__ == TestCSR) + msg = "Bool comparisons only implemented for BSR, CSC, and CSR." + yield dec.skipif(fails, msg)(check), dtype + + for dtype in self.checked_dtypes: + msg = "Dense rhs is not supported for inequalities." + yield dec.knownfailureif(True, msg)(check_fail), dtype + def test_empty(self): # create empty matrices assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3))) @@ -980,6 +1219,7 @@ class _TestInplaceArithmetic: if not np.can_cast(dtype, np.int_): yield check, dtype + class _TestGetSet: def test_getelement(self): D = array([[1,0,0], commit 75768bcbff117b986ce7f4ad91827532a71f699c Author: Blake Griffith Date: Tue Jun 18 17:52:13 2013 -0500 ENH: Add inequalities to compressed.py, bsr.py. diff --git a/scipy/sparse/bsr.py b/scipy/sparse/bsr.py index 6d8ff64..80948b2 100644 --- a/scipy/sparse/bsr.py +++ b/scipy/sparse/bsr.py @@ -517,7 +517,8 @@ class bsr_matrix(_cs_matrix, _minmax_mixin): indptr = np.empty_like(self.indptr) indices = np.empty(max_bnnz, dtype=np.intc) - if op == '_ne_': + bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] + if op in bool_ops: data = np.empty(R*C*max_bnnz, dtype=np.bool_) else: data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index 4b25e09..e73cfab 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -177,7 +177,7 @@ class _cs_matrix(_data_matrix, _minmax_mixin): res = self._binopt(other_arr,'_ne_') if other == 0: warn("Comparing a sparse matrix with 0 using == is inefficient" - ", try using != instead.") + ", try using != instead.", SparseEfficiencyWarning) all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) return all_true - res else: @@ -189,7 +189,7 @@ class _cs_matrix(_data_matrix, _minmax_mixin): # Sparse other. elif isspmatrix(other): warn("Comparing sparse matrices using == is inefficient, try using" - " != instead.") + " != instead.", SparseEfficiencyWarning) #TODO sparse broadcasting if self.shape != other.shape: return False @@ -206,7 +206,7 @@ class _cs_matrix(_data_matrix, _minmax_mixin): if isscalarlike(other): if other != 0: warn("Comparing a sparse matrix with a nonzero scalar using !=" - " is inefficient, try using == instead.") + " is inefficient, try using == instead.", SparseEfficiencyWarning) all_true = self.__class__(np.ones(self.shape), dtype=np.bool_) res = (self == other) return all_true - res @@ -228,6 +228,124 @@ class _cs_matrix(_data_matrix, _minmax_mixin): else: return True + def __lt__(self, other): + # Scalar other. + if isscalarlike(other): + if 0 < other: + warn("Comparing a sparse matrix with a scalar greater than " + "zero using < is inefficient, try using > instead.", SparseEfficiencyWarning) + other_arr = self.__class__(np.ones(self.shape)) * other + return self._binopt(other_arr, '_lt_') + else: + other_arr = self.copy() + other_arr.data[:] = other + return self._binopt(other_arr, '_lt_') + # Dense other. + elif isdense(other): + return self.todense() < other + # Sparse other. + elif isspmatrix(other): + #TODO sparse broadcasting + if self.shape != other.shape: + raise ValueError("inconsistent shapes") + elif self.format != other.format: + other = other.asformat(self.format) + return self._binopt(other, '_lt_') + else: + raise ValueError("Operands could not be compared.") + + def __gt__(self, other): + # Scalar other. + if isscalarlike(other): + if 0 > other: + warn("Comparing a sparse matrix with a scalar less than zero " + "using > is inefficient, try using < instead.", SparseEfficiencyWarning) + other_arr = self.__class__(np.ones(self.shape)) * other + return self._binopt(other_arr, '_gt_') + else: + other_arr = self.copy() + other_arr.data[:] = other + return self._binopt(other_arr, '_gt_') + # Dense other. + elif isdense(other): + return self.todense() > other + # Sparse other. + elif isspmatrix(other): + #TODO sparse broadcasting + if self.shape != other.shape: + raise ValueError("inconsistent shapes") + elif self.format != other.format: + other = other.asformat(self.format) + return self._binopt(other, '_gt_') + else: + raise ValueError("Operands could not be compared.") + + def __le__(self,other): + # Scalar other. + if isscalarlike(other): + if 0 == other: + raise NotImplementedError(" >= and <= don't work with 0.") + elif 0 <= other: + warn("Comparing a sparse matrix with a scalar less than zero " + "using <= is inefficient, try using < instead.", SparseEfficiencyWarning) + other_arr = self.__class__(np.ones(self.shape)) * other + return self._binopt(other_arr, '_le_') + else: + # Casting as other's type avoids corner case like + # ``spmatrix(True) < -2'' from being True. + other_arr = self.astype(type(other)).copy() + other_arr.data[:] = other + return self._binopt(other_arr, '_le_') + # Dense other. + elif isdense(other): + return self.todense() <= other + # Sparse other. + elif isspmatrix(other): + #TODO sparse broadcasting + if self.shape != other.shape: + raise ValueError("inconsistent shapes") + elif self.format != other.format: + other = other.asformat(self.format) + warn("Comparing sparse matrices using >= and <= is inefficient, " + "using <, >, or !=, instead.", SparseEfficiencyWarning) + all_true = self.__class__(np.ones(self.shape)) + res = self._binopt(other, '_gt_') + return all_true - res + else: + raise ValueError("Operands could not be compared.") + + def __ge__(self,other): + # Scalar other. + if isscalarlike(other): + if 0 == other: + raise NotImplementedError(" >= and <= don't work with 0.") + elif 0 >= other: + warn("Comparing a sparse matrix with a scalar greater than zero" + " using >= is inefficient, try using < instead.", SparseEfficiencyWarning) + other_arr = self.__class__(np.ones(self.shape)) * other + return self._binopt(other_arr, '_ge_') + else: + other_arr = self.astype(type(other)).copy() + other_arr.data[:] = other + return self._binopt(other_arr, '_ge_') + # Dense other. + elif isdense(other): + return self.todense() >= other + # Sparse other. + elif isspmatrix(other): + #TODO sparse broadcasting + if self.shape != other.shape: + raise ValueError("inconsistent shapes") + elif self.format != other.format: + other = other.asformat(self.format) + warn("Comparing sparse matrices using >= and <= is inefficient, " + "try using <, >, or !=, instead.", SparseEfficiencyWarning) + all_true = self.__class__(np.ones(self.shape)) + res = self._binopt(other, '_lt_') + return all_true - res + else: + raise ValueError("Operands could not be compared.") + ################################# # Arithmatic operator overrides # ################################# @@ -763,7 +881,8 @@ class _cs_matrix(_data_matrix, _minmax_mixin): indptr = np.empty_like(self.indptr) indices = np.empty(maxnnz, dtype=np.intc) - if op == '_ne_': + bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_'] + if op in bool_ops: data = np.empty(maxnnz, dtype=np.bool_) else: data = np.empty(maxnnz, dtype=upcast(self.dtype,other.dtype)) commit a41762d590dca70945807cc656ee1847a713f558 Author: Blake Griffith Date: Wed Jun 19 13:48:52 2013 -0500 ENH: Add inequalities to base.py diff --git a/scipy/sparse/base.py b/scipy/sparse/base.py index 246e174..9f7ebe5 100644 --- a/scipy/sparse/base.py +++ b/scipy/sparse/base.py @@ -233,6 +233,18 @@ class spmatrix(object): def __ne__(self, other): return self.tocsr().__ne__(other) + def __lt__(self,other): + return self.tocsr().__lt__(other) + + def __gt__(self,other): + return self.tocsr().__gt__(other) + + def __le__(self,other): + return self.tocsr().__le__(other) + + def __ge__(self,other): + return self.tocsr().__ge__(other) + def __abs__(self): return abs(self.tocsr()) commit 38c94a8fec355434608e13eb4d3ebe3d3a5c700b Author: Blake Griffith Date: Wed Jun 19 21:33:38 2013 -0500 ENH: Regenerated SWIG wrappers. diff --git a/scipy/sparse/sparsetools/bsr.py b/scipy/sparse/sparsetools/bsr.py index 4514888..05c51c0 100644 Binary files a/scipy/sparse/sparsetools/bsr.py and b/scipy/sparse/sparsetools/bsr.py differ diff --git a/scipy/sparse/sparsetools/bsr_wrap.cxx b/scipy/sparse/sparsetools/bsr_wrap.cxx index c8a3708..739c17e 100644 Binary files a/scipy/sparse/sparsetools/bsr_wrap.cxx and b/scipy/sparse/sparsetools/bsr_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csc.py b/scipy/sparse/sparsetools/csc.py index 90b2617..9853872 100644 Binary files a/scipy/sparse/sparsetools/csc.py and b/scipy/sparse/sparsetools/csc.py differ diff --git a/scipy/sparse/sparsetools/csc_wrap.cxx b/scipy/sparse/sparsetools/csc_wrap.cxx index 87a5167..da13d1b 100644 Binary files a/scipy/sparse/sparsetools/csc_wrap.cxx and b/scipy/sparse/sparsetools/csc_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csr.py b/scipy/sparse/sparsetools/csr.py index 23b5ce7..25d9cb9 100644 Binary files a/scipy/sparse/sparsetools/csr.py and b/scipy/sparse/sparsetools/csr.py differ diff --git a/scipy/sparse/sparsetools/csr_wrap.cxx b/scipy/sparse/sparsetools/csr_wrap.cxx index a4184ee..660e8a9 100644 Binary files a/scipy/sparse/sparsetools/csr_wrap.cxx and b/scipy/sparse/sparsetools/csr_wrap.cxx differ commit 0a7722e37ccf737b4fca47669b9f2efe7505fc88 Author: Blake Griffith Date: Tue Jun 18 12:08:15 2013 -0500 ENH: Add inequality routines to compressed types in sparsetools. diff --git a/scipy/sparse/sparsetools/bsr.h b/scipy/sparse/sparsetools/bsr.h index ea87cbd..5a58e2e 100644 --- a/scipy/sparse/sparsetools/bsr.h +++ b/scipy/sparse/sparsetools/bsr.h @@ -601,6 +601,42 @@ void bsr_ne_bsr(const I n_row, const I n_col, const I R, const I C, bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::not_equal_to()); } +template +void bsr_lt_bsr(const I n_row, const I n_col, const I R, const I C, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T2 Cx[]) +{ + bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::less()); +} + +template +void bsr_gt_bsr(const I n_row, const I n_col, const I R, const I C, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T2 Cx[]) +{ + bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::greater()); +} + +template +void bsr_le_bsr(const I n_row, const I n_col, const I R, const I C, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T2 Cx[]) +{ + bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::less_equal()); +} + +template +void bsr_ge_bsr(const I n_row, const I n_col, const I R, const I C, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T2 Cx[]) +{ + bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::greater_equal()); +} + template void bsr_elmul_bsr(const I n_row, const I n_col, const I R, const I C, const I Ap[], const I Aj[], const T Ax[], diff --git a/scipy/sparse/sparsetools/bsr.i b/scipy/sparse/sparsetools/bsr.i index 5da7cf9..3123ba3 100644 --- a/scipy/sparse/sparsetools/bsr.i +++ b/scipy/sparse/sparsetools/bsr.i @@ -23,3 +23,7 @@ INSTANTIATE_ALL(bsr_minus_bsr) INSTANTIATE_ALL(bsr_sort_indices) INSTANTIATE_BOOL_OUT(bsr_ne_bsr) +INSTANTIATE_BOOL_OUT(bsr_lt_bsr) +INSTANTIATE_BOOL_OUT(bsr_gt_bsr) +INSTANTIATE_BOOL_OUT(bsr_le_bsr) +INSTANTIATE_BOOL_OUT(bsr_ge_bsr) diff --git a/scipy/sparse/sparsetools/csc.h b/scipy/sparse/sparsetools/csc.h index 05e6684..8cb5da8 100644 --- a/scipy/sparse/sparsetools/csc.h +++ b/scipy/sparse/sparsetools/csc.h @@ -135,18 +135,50 @@ void csc_matmat_pass2(const I n_row, T Cx[]) { csr_matmat_pass2(n_col, n_row, Bp, Bi, Bx, Ap, Ai, Ax, Cp, Ci, Cx); } +template +void csc_ne_csc(const I n_row, const I n_col, + const I Ap[], const I Ai[], const T Ax[], + const I Bp[], const I Bi[], const T Bx[], + I Cp[], I Ci[], T2 Cx[]) +{ + csr_ne_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); +} +template +void csc_lt_csc(const I n_row, const I n_col, + const I Ap[], const I Ai[], const T Ax[], + const I Bp[], const I Bi[], const T Bx[], + I Cp[], I Ci[], T2 Cx[]) +{ + csr_lt_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); +} +template +void csc_gt_csc(const I n_row, const I n_col, + const I Ap[], const I Ai[], const T Ax[], + const I Bp[], const I Bi[], const T Bx[], + I Cp[], I Ci[], T2 Cx[]) +{ + csr_gt_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); +} template -void csc_ne_csc(const I n_row, const I n_col, +void csc_le_csc(const I n_row, const I n_col, const I Ap[], const I Ai[], const T Ax[], const I Bp[], const I Bi[], const T Bx[], I Cp[], I Ci[], T2 Cx[]) { - csr_ne_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); + csr_le_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); } +template +void csc_ge_csc(const I n_row, const I n_col, + const I Ap[], const I Ai[], const T Ax[], + const I Bp[], const I Bi[], const T Bx[], + I Cp[], I Ci[], T2 Cx[]) +{ + csr_ge_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); +} template void csc_elmul_csc(const I n_row, const I n_col, diff --git a/scipy/sparse/sparsetools/csc.i b/scipy/sparse/sparsetools/csc.i index 8c3cf1c..9706505 100644 --- a/scipy/sparse/sparsetools/csc.i +++ b/scipy/sparse/sparsetools/csc.i @@ -22,3 +22,7 @@ INSTANTIATE_ALL(csc_plus_csc) INSTANTIATE_ALL(csc_minus_csc) INSTANTIATE_BOOL_OUT(csc_ne_csc) +INSTANTIATE_BOOL_OUT(csc_lt_csc) +INSTANTIATE_BOOL_OUT(csc_gt_csc) +INSTANTIATE_BOOL_OUT(csc_le_csc) +INSTANTIATE_BOOL_OUT(csc_ge_csc) diff --git a/scipy/sparse/sparsetools/csr.h b/scipy/sparse/sparsetools/csr.h index 277c8e7..a401699 100644 --- a/scipy/sparse/sparsetools/csr.h +++ b/scipy/sparse/sparsetools/csr.h @@ -865,6 +865,42 @@ void csr_ne_csr(const I n_row, const I n_col, csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::not_equal_to()); } +template +void csr_lt_csr(const I n_row, const I n_col, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T2 Cx[]) +{ + csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::less()); +} + +template +void csr_gt_csr(const I n_row, const I n_col, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T2 Cx[]) +{ + csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::greater()); +} + +template +void csr_le_csr(const I n_row, const I n_col, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T2 Cx[]) +{ + csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::less_equal()); +} + +template +void csr_ge_csr(const I n_row, const I n_col, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T2 Cx[]) +{ + csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::greater_equal()); +} + template void csr_elmul_csr(const I n_row, const I n_col, const I Ap[], const I Aj[], const T Ax[], diff --git a/scipy/sparse/sparsetools/csr.i b/scipy/sparse/sparsetools/csr.i index 81b83eb..294df9b 100644 --- a/scipy/sparse/sparsetools/csr.i +++ b/scipy/sparse/sparsetools/csr.i @@ -33,3 +33,7 @@ INSTANTIATE_ALL(get_csr_submatrix) INSTANTIATE_ALL(csr_sample_values) INSTANTIATE_BOOL_OUT(csr_ne_csr) +INSTANTIATE_BOOL_OUT(csr_lt_csr) +INSTANTIATE_BOOL_OUT(csr_gt_csr) +INSTANTIATE_BOOL_OUT(csr_le_csr) +INSTANTIATE_BOOL_OUT(csr_ge_csr) commit e036c5bd6ab383c54420baac36141d9bbf7e96e8 Author: Blake Griffith Date: Tue Jun 18 12:02:19 2013 -0500 ENH: Add inequality overrides to complex_ops.h. diff --git a/scipy/sparse/sparsetools/complex_ops.h b/scipy/sparse/sparsetools/complex_ops.h index 4687121..154305e 100644 --- a/scipy/sparse/sparsetools/complex_ops.h +++ b/scipy/sparse/sparsetools/complex_ops.h @@ -68,18 +68,75 @@ class complex_wrapper : public npy_type { npy_type::real = temp; return (*this); } + /* Boolean operations */ bool operator==(const complex_wrapper& B) const{ return npy_type::real == B.real && npy_type::imag == B.imag; } bool operator!=(const complex_wrapper& B) const{ return npy_type::real != B.real || npy_type::imag != B.imag; } + bool operator<(const complex_wrapper& B) const{ + if (npy_type::real == B.real){ + return npy_type::imag < B.imag; + } else { + return npy_type::real < B.real; + } + } + bool operator>(const complex_wrapper& B) const{ + if (npy_type::real == B.real){ + return npy_type::imag > B.imag; + } else { + return npy_type::real > B.real; + } + } + bool operator<=(const complex_wrapper& B) const{ + if (npy_type::real == B.real){ + return npy_type::imag <= B.imag; + } else { + return npy_type::real <= B.real; + } + } + bool operator>=(const complex_wrapper& B) const{ + if (npy_type::real == B.real){ + return npy_type::imag >= B.imag; + } else { + return npy_type::real >= B.real; + } + } bool operator==(const c_type& B) const{ return npy_type::real == B && npy_type::imag == c_type(0); } bool operator!=(const c_type& B) const{ return npy_type::real != B || npy_type::imag != c_type(0); } + bool operator<(const c_type& B) const{ + if (npy_type::real == B) { + return npy_type::imag < c_type(0); + } else { + return npy_type::real < B; + } + } + bool operator>(const c_type& B) const{ + if (npy_type::real == B) { + return npy_type::imag > c_type(0); + } else { + return npy_type::real > B; + } + } + bool operator<=(const c_type& B) const{ + if (npy_type::real == B) { + return npy_type::imag <= c_type(0); + } else { + return npy_type::real <= B; + } + } + bool operator>=(const c_type& B) const{ + if (npy_type::real == B) { + return npy_type::imag >= c_type(0); + } else { + return npy_type::real >= B; + } + } complex_wrapper& operator=(const complex_wrapper& B){ npy_type::real = B.real; npy_type::imag = B.imag; @@ -97,7 +154,7 @@ std::ostream& operator<<(std::ostream& out, const complex_wrapper& cw){ return out << cw.real << " " << cw.imag; } -typedef complex_wrapper npy_cfloat_wrapper; +typedef complex_wrapper npy_cfloat_wrapper; typedef complex_wrapper npy_cdouble_wrapper; typedef complex_wrapper npy_clongdouble_wrapper; commit 2d075b42d24f9d1df4376faf378f2438e2c23c48 Author: Blake Griffith Date: Sat Jun 22 22:25:26 2013 -0500 ENH: Altered _binop and boolean comparisons to use the boolean data output from sparsetools routines. diff --git a/scipy/sparse/bsr.py b/scipy/sparse/bsr.py index 5180b1d..6d8ff64 100644 --- a/scipy/sparse/bsr.py +++ b/scipy/sparse/bsr.py @@ -502,10 +502,10 @@ class bsr_matrix(_cs_matrix, _minmax_mixin): # utility functions def _binopt(self, other, op, in_shape=None, out_shape=None): - """apply the binary operation fn to two sparse matrices""" + """Apply the binary operation fn to two sparse matrices.""" - # ideally we'd take the GCDs of the blocksize dimensions - # and explode self and other to match + # Ideally we'd take the GCDs of the blocksize dimensions + # and explode self and other to match. other = self.__class__(other, blocksize=self.blocksize) # e.g. bsr_plus_bsr, etc. @@ -516,7 +516,11 @@ class bsr_matrix(_cs_matrix, _minmax_mixin): max_bnnz = len(self.data) + len(other.data) indptr = np.empty_like(self.indptr) indices = np.empty(max_bnnz, dtype=np.intc) - data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) + + if op == '_ne_': + data = np.empty(R*C*max_bnnz, dtype=np.bool_) + else: + data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype)) fn(self.shape[0]//R, self.shape[1]//C, R, C, self.indptr, self.indices, np.ravel(self.data), diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index 866a9c4..4b25e09 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -174,11 +174,11 @@ class _cs_matrix(_data_matrix, _minmax_mixin): if isscalarlike(other): other_arr = self.copy() other_arr.data[:] = other - res = self._binopt(other_arr,'_ne_').astype(bool) + res = self._binopt(other_arr,'_ne_') if other == 0: warn("Comparing a sparse matrix with 0 using == is inefficient" ", try using != instead.") - all_true = self.__class__(np.ones(self.shape).astype(bool)) + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) return all_true - res else: self_as_bool = self.astype(bool) @@ -195,8 +195,8 @@ class _cs_matrix(_data_matrix, _minmax_mixin): return False elif self.format != other.format: other = other.asformat(self.format) - res = self._binopt(other,'_ne_').astype(bool) - all_true = self.__class__(np.ones(self.shape).astype(bool)) + res = self._binopt(other,'_ne_') + all_true = self.__class__(np.ones(self.shape, dtype=np.bool_)) return all_true - res else: return False @@ -207,13 +207,13 @@ class _cs_matrix(_data_matrix, _minmax_mixin): if other != 0: warn("Comparing a sparse matrix with a nonzero scalar using !=" " is inefficient, try using == instead.") - all_true = self.__class__(np.ones(self.shape).astype(bool)) + all_true = self.__class__(np.ones(self.shape), dtype=np.bool_) res = (self == other) return all_true - res else: other_arr = self.copy() other_arr.data[:] = other - return self._binopt(other_arr,'_ne_').astype(bool) + return self._binopt(other_arr,'_ne_') # Dense other. elif isdense(other): return self.todense() != other @@ -224,7 +224,7 @@ class _cs_matrix(_data_matrix, _minmax_mixin): return True elif self.format != other.format: other = other.asformat(self.format) - return self._binopt(other,'_ne_').astype(bool) + return self._binopt(other,'_ne_') else: return True @@ -753,7 +753,7 @@ class _cs_matrix(_data_matrix, _minmax_mixin): shape=self.shape,dtype=data.dtype) def _binopt(self, other, op): - """apply the binary operation fn to two sparse matrices""" + """apply the binary operation fn to two sparse matrices.""" other = self.__class__(other) # e.g. csr_plus_csr, csr_minus_csr, etc. @@ -762,7 +762,11 @@ class _cs_matrix(_data_matrix, _minmax_mixin): maxnnz = self.nnz + other.nnz indptr = np.empty_like(self.indptr) indices = np.empty(maxnnz, dtype=np.intc) - data = np.empty(maxnnz, dtype=upcast(self.dtype,other.dtype)) + + if op == '_ne_': + data = np.empty(maxnnz, dtype=np.bool_) + else: + data = np.empty(maxnnz, dtype=upcast(self.dtype,other.dtype)) fn(self.shape[0], self.shape[1], self.indptr, self.indices, self.data, commit 859b81154e061b0650c0b4d19f35a74d8fad8b60 Author: Blake Griffith Date: Sat Jun 22 21:59:35 2013 -0500 ENH: Regenerated SWIG wrappers. diff --git a/scipy/sparse/sparsetools/bsr.py b/scipy/sparse/sparsetools/bsr.py index 1942d5a..4514888 100644 Binary files a/scipy/sparse/sparsetools/bsr.py and b/scipy/sparse/sparsetools/bsr.py differ diff --git a/scipy/sparse/sparsetools/bsr_wrap.cxx b/scipy/sparse/sparsetools/bsr_wrap.cxx index 00dc783..c8a3708 100644 Binary files a/scipy/sparse/sparsetools/bsr_wrap.cxx and b/scipy/sparse/sparsetools/bsr_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csc.py b/scipy/sparse/sparsetools/csc.py index 3c419e1..90b2617 100644 Binary files a/scipy/sparse/sparsetools/csc.py and b/scipy/sparse/sparsetools/csc.py differ diff --git a/scipy/sparse/sparsetools/csc_wrap.cxx b/scipy/sparse/sparsetools/csc_wrap.cxx index fca7d3f..87a5167 100644 Binary files a/scipy/sparse/sparsetools/csc_wrap.cxx and b/scipy/sparse/sparsetools/csc_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csr.py b/scipy/sparse/sparsetools/csr.py index 6b8a43f..23b5ce7 100644 Binary files a/scipy/sparse/sparsetools/csr.py and b/scipy/sparse/sparsetools/csr.py differ diff --git a/scipy/sparse/sparsetools/csr_wrap.cxx b/scipy/sparse/sparsetools/csr_wrap.cxx index 0791111..a4184ee 100644 Binary files a/scipy/sparse/sparsetools/csr_wrap.cxx and b/scipy/sparse/sparsetools/csr_wrap.cxx differ commit c14f501a407a94820dc935dbe111edb1b0773816 Author: Blake Griffith Date: Sat Jun 22 22:18:01 2013 -0500 ENH: Altered bsr, csc, and csr routines to produce boolean output. diff --git a/scipy/sparse/sparsetools/bsr.h b/scipy/sparse/sparsetools/bsr.h index 464cf2a..ea87cbd 100644 --- a/scipy/sparse/sparsetools/bsr.h +++ b/scipy/sparse/sparsetools/bsr.h @@ -8,7 +8,6 @@ #include "csr.h" #include "dense.h" - template void bsr_diagonal(const I n_brow, const I n_bcol, @@ -349,12 +348,12 @@ bool is_nonzero_block(const T block[], const I blocksize){ * C will not contain any duplicate entries or explicit zeros. * */ -template +template void bsr_binop_bsr_general(const I n_brow, const I n_bcol, const I R, const I C, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], + I Cp[], I Cj[], T2 Cx[], const bin_op& op) { //Method that works for duplicate and/or unsorted indices @@ -439,16 +438,16 @@ void bsr_binop_bsr_general(const I n_brow, const I n_bcol, * Cx will not contain any zero entries * */ -template +template void bsr_binop_bsr_canonical(const I n_brow, const I n_bcol, const I R, const I C, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], + I Cp[], I Cj[], T2 Cx[], const bin_op& op) { const I RC = R*C; - T * result = Cx; + T2 * result = Cx; Cp[0] = 0; I nnz = 0; @@ -568,12 +567,12 @@ void bsr_binop_bsr_canonical(const I n_brow, const I n_bcol, * Cx will not contain any zero entries * */ -template +template void bsr_binop_bsr(const I n_brow, const I n_bcol, const I R, const I C, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], + I Cp[], I Cj[], T2 Cx[], const bin_op& op) { assert( R > 0 && C > 0); @@ -593,11 +592,11 @@ void bsr_binop_bsr(const I n_brow, const I n_bcol, } /* element-wise binary operations */ -template +template void bsr_ne_bsr(const I n_row, const I n_col, const I R, const I C, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) + I Cp[], I Cj[], T2 Cx[]) { bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::not_equal_to()); } diff --git a/scipy/sparse/sparsetools/bsr.i b/scipy/sparse/sparsetools/bsr.i index e721249..5da7cf9 100644 --- a/scipy/sparse/sparsetools/bsr.i +++ b/scipy/sparse/sparsetools/bsr.i @@ -9,7 +9,6 @@ %include "bsr.h" - INSTANTIATE_ALL(bsr_diagonal) INSTANTIATE_ALL(bsr_scale_rows) INSTANTIATE_ALL(bsr_scale_columns) @@ -17,11 +16,10 @@ INSTANTIATE_ALL(bsr_transpose) INSTANTIATE_ALL(bsr_matmat_pass2) INSTANTIATE_ALL(bsr_matvec) INSTANTIATE_ALL(bsr_matvecs) -INSTANTIATE_ALL(bsr_ne_bsr) INSTANTIATE_ALL(bsr_elmul_bsr) INSTANTIATE_ALL(bsr_eldiv_bsr) INSTANTIATE_ALL(bsr_plus_bsr) INSTANTIATE_ALL(bsr_minus_bsr) INSTANTIATE_ALL(bsr_sort_indices) - +INSTANTIATE_BOOL_OUT(bsr_ne_bsr) diff --git a/scipy/sparse/sparsetools/csc.h b/scipy/sparse/sparsetools/csc.h index 0c221ba..05e6684 100644 --- a/scipy/sparse/sparsetools/csc.h +++ b/scipy/sparse/sparsetools/csc.h @@ -1,10 +1,8 @@ #ifndef __CSC_H__ #define __CSC_H__ - #include "csr.h" - /* * Compute Y += A*X for CSC matrix A and dense vectors X,Y * @@ -140,11 +138,11 @@ void csc_matmat_pass2(const I n_row, -template +template void csc_ne_csc(const I n_row, const I n_col, const I Ap[], const I Ai[], const T Ax[], const I Bp[], const I Bi[], const T Bx[], - I Cp[], I Ci[], T Cx[]) + I Cp[], I Ci[], T2 Cx[]) { csr_ne_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); } diff --git a/scipy/sparse/sparsetools/csc.i b/scipy/sparse/sparsetools/csc.i index 29f6f26..8c3cf1c 100644 --- a/scipy/sparse/sparsetools/csc.i +++ b/scipy/sparse/sparsetools/csc.i @@ -16,8 +16,9 @@ INSTANTIATE_ALL(csc_tocsr) INSTANTIATE_ALL(csc_matmat_pass2) INSTANTIATE_ALL(csc_matvec) INSTANTIATE_ALL(csc_matvecs) -INSTANTIATE_ALL(csc_ne_csc) INSTANTIATE_ALL(csc_elmul_csc) INSTANTIATE_ALL(csc_eldiv_csc) INSTANTIATE_ALL(csc_plus_csc) INSTANTIATE_ALL(csc_minus_csc) + +INSTANTIATE_BOOL_OUT(csc_ne_csc) diff --git a/scipy/sparse/sparsetools/csr.h b/scipy/sparse/sparsetools/csr.h index 6c59a25..277c8e7 100644 --- a/scipy/sparse/sparsetools/csr.h +++ b/scipy/sparse/sparsetools/csr.h @@ -637,11 +637,11 @@ void csr_matmat_pass2(const I n_row, * C will not contain any duplicate entries or explicit zeros. * */ -template +template void csr_binop_csr_general(const I n_row, const I n_col, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], + I Cp[], I Cj[], T2 Cx[], const binary_op& op) { //Method that works for duplicate and/or unsorted indices @@ -727,11 +727,11 @@ void csr_binop_csr_general(const I n_row, const I n_col, * Cx will not contain any zero entries * */ -template +template void csr_binop_csr_canonical(const I n_row, const I n_col, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[], + I Cp[], I Cj[], T2 Cx[], const binary_op& op) { //Method that works for canonical CSR matrices @@ -835,7 +835,7 @@ void csr_binop_csr_canonical(const I n_row, const I n_col, * Cx will not contain any zero entries * */ -template +template void csr_binop_csr(const I n_row, const I n_col, const I Ap[], @@ -846,7 +846,7 @@ void csr_binop_csr(const I n_row, const T Bx[], I Cp[], I Cj[], - T Cx[], + T2 Cx[], const binary_op& op) { if (csr_has_canonical_format(n_row,Ap,Aj) && csr_has_canonical_format(n_row,Bp,Bj)) @@ -855,14 +855,12 @@ void csr_binop_csr(const I n_row, csr_binop_csr_general(n_row, n_col, Ap, Aj, Ax, Bp, Bj, Bx, Cp, Cj, Cx, op); } - - /* element-wise binary operations*/ -template +template void csr_ne_csr(const I n_row, const I n_col, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], - I Cp[], I Cj[], T Cx[]) + I Cp[], I Cj[], T2 Cx[]) { csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::not_equal_to()); } diff --git a/scipy/sparse/sparsetools/csr.i b/scipy/sparse/sparsetools/csr.i index 9bb237b..81b83eb 100644 --- a/scipy/sparse/sparsetools/csr.i +++ b/scipy/sparse/sparsetools/csr.i @@ -9,7 +9,6 @@ %include "csr.h" - INSTANTIATE_INDEX(expandptr) INSTANTIATE_INDEX(csr_matmat_pass1) INSTANTIATE_INDEX(csr_count_blocks) @@ -23,7 +22,6 @@ INSTANTIATE_ALL(csr_tobsr) INSTANTIATE_ALL(csr_matmat_pass2) INSTANTIATE_ALL(csr_matvec) INSTANTIATE_ALL(csr_matvecs) -INSTANTIATE_ALL(csr_ne_csr) INSTANTIATE_ALL(csr_elmul_csr) INSTANTIATE_ALL(csr_eldiv_csr) INSTANTIATE_ALL(csr_plus_csr) @@ -34,3 +32,4 @@ INSTANTIATE_ALL(csr_sum_duplicates) INSTANTIATE_ALL(get_csr_submatrix) INSTANTIATE_ALL(csr_sample_values) +INSTANTIATE_BOOL_OUT(csr_ne_csr) commit b7a40138e4a3cb6d534c5097b0e33559c91e231f Author: Blake Griffith Date: Sat Jun 22 21:58:44 2013 -0500 ENH: Added macro for intantiating boolean comparisons to sparsetools.i diff --git a/scipy/sparse/sparsetools/sparsetools.i b/scipy/sparse/sparsetools/sparsetools.i index 280eea0..d4509e6 100644 --- a/scipy/sparse/sparsetools/sparsetools.i +++ b/scipy/sparse/sparsetools/sparsetools.i @@ -200,3 +200,22 @@ DECLARE_DATA_TYPE( npy_clongdouble_wrapper ) /* 64-bit indices would go here */ %enddef +%define INSTANTIATE_BOOL_OUT( f_name ) +/* 32-bit indices */ +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +%template(f_name) f_name; +/* 64-bit indices would go here */ +%enddef commit 7e10e840e93ecec6ec9c42bbd7fd4fcf9327ae5e Author: Blake Griffith Date: Sat Jun 22 21:57:36 2013 -0500 ENH: Added bool conversion to complex_ops.h diff --git a/scipy/sparse/sparsetools/complex_ops.h b/scipy/sparse/sparsetools/complex_ops.h index 7762001..4687121 100644 --- a/scipy/sparse/sparsetools/complex_ops.h +++ b/scipy/sparse/sparsetools/complex_ops.h @@ -18,6 +18,13 @@ class complex_wrapper : public npy_type { npy_type::real = r; npy_type::imag = i; } + operator bool() const { + if (npy_type::real == 0 && npy_type::imag == 0) { + return false; + } else { + return true; + } + } complex_wrapper operator-() const { return complex_wrapper(-npy_type::real,-npy_type::imag); } commit e73a44f56ca1a2f108a06ac3c872bed236de44f4 Author: Blake Griffith Date: Thu Jun 20 22:06:23 2013 -0500 TST: Remove deprecated tests. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index d71e85c..0df4714 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -937,17 +937,20 @@ class _TestInplaceArithmetic: dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] - a = datsp.copy() - a *= 2 - b = dat.copy() - b *= 2 - assert_array_equal(b, a.todense()) - - a = datsp.copy() - a *= 17.3 - b = dat.copy() - b *= 17.3 - assert_array_equal(b, a.todense()) + # Avoid implicit casting. + if np.can_cast(type(2), dtype, casting='same_kind'): + a = datsp.copy() + a *= 2 + b = dat.copy() + b *= 2 + assert_array_equal(b, a.todense()) + + if np.can_cast(type(17.3), dtype, casting='same_kind'): + a = datsp.copy() + a *= 17.3 + b = dat.copy() + b *= 17.3 + assert_array_equal(b, a.todense()) for dtype in self.checked_dtypes: yield check, dtype @@ -957,21 +960,25 @@ class _TestInplaceArithmetic: dat = self.dat_dtypes[dtype] datsp = self.datsp_dtypes[dtype] - a = datsp.copy() - a /= 2 - b = dat.copy() - b /= 2 - assert_array_equal(b, a.todense()) + if np.can_cast(type(2), dtype, casting='same_kind'): + a = datsp.copy() + a /= 2 + b = dat.copy() + b /= 2 + assert_array_equal(b, a.todense()) - a = datsp.copy() - a /= 17.3 - b = dat.copy() - b /= 17.3 - assert_array_equal(b, a.todense()) + if np.can_cast(type(17.3), dtype, casting='same_kind'): + a = datsp.copy() + a /= 17.3 + b = dat.copy() + b /= 17.3 + assert_array_equal(b, a.todense()) for dtype in self.checked_dtypes: - yield check, dtype - + # /= should only be used with float dtypes to avoid implicit + # casting. + if not np.can_cast(dtype, np.int_): + yield check, dtype class _TestGetSet: def test_getelement(self): commit 5eb84282af40d8dd0751b4e7f2e12cd98b66329e Author: Blake Griffith Date: Sat Jun 8 17:00:24 2013 -0500 TST: Tests for the changed sum and mean methods. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index bd5cefb..b0b7fd2 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -166,28 +166,40 @@ class _TestCommon: def test_sum(self): def check(dtype): - dat = self.dat_dtypes[dtype] - datsp = self.datsp_dtypes[dtype] - + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]], dtype=dtype) + datsp = self.spmatrix(dat, dtype=dtype) + # Does the matrix's .sum(axis=...) method work? - assert_array_equal(dat.sum(), datsp.sum()) - assert_array_equal(dat.sum(axis=None), datsp.sum(axis=None)) - assert_array_equal(dat.sum(axis=0), datsp.sum(axis=0)) - assert_array_equal(dat.sum(axis=1), datsp.sum(axis=1)) + assert_array_almost_equal(dat.sum(), datsp.sum()) + assert_equal(dat.sum().dtype, datsp.sum().dtype) + assert_array_almost_equal(dat.sum(axis=None), datsp.sum(axis=None)) + assert_equal(dat.sum(axis=None).dtype, datsp.sum(axis=None).dtype) + assert_array_almost_equal(dat.sum(axis=0), datsp.sum(axis=0)) + assert_equal(dat.sum(axis=0).dtype, datsp.sum(axis=0).dtype) + assert_array_almost_equal(dat.sum(axis=1), datsp.sum(axis=1)) + assert_equal(dat.sum(axis=1).dtype, datsp.sum(axis=1).dtype) for dtype in self.checked_dtypes: yield check, dtype def test_mean(self): def check(dtype): - dat = self.dat_dtypes[dtype] - datsp = self.datsp_dtypes[dtype] + dat = np.matrix([[0, 1, 2], + [3, -4, 5], + [-6, 7, 9]], dtype=dtype) + datsp = self.spmatrix(dat, dtype=dtype) # Does the matrix's .mean(axis=...) method work? - assert_array_equal(dat.mean(), datsp.mean()) - assert_array_equal(dat.mean(axis=None), datsp.mean(axis=None)) - assert_array_equal(dat.mean(axis=0), datsp.mean(axis=0)) - assert_array_equal(dat.mean(axis=1), datsp.mean(axis=1)) + assert_array_almost_equal(dat.mean(), datsp.mean()) + assert_equal(dat.mean().dtype, datsp.mean().dtype) + assert_array_almost_equal(dat.mean(axis=None), datsp.mean(axis=None)) + assert_equal(dat.mean(axis=None).dtype, datsp.mean(axis=None).dtype) + assert_array_almost_equal(dat.mean(axis=0), datsp.mean(axis=0)) + assert_equal(dat.mean(axis=0).dtype, datsp.mean(axis=0).dtype) + assert_array_almost_equal(dat.mean(axis=1), datsp.mean(axis=1)) + assert_equal(dat.mean(axis=1).dtype, datsp.mean(axis=1).dtype) for dtype in self.checked_dtypes: yield check, dtype @@ -388,7 +400,7 @@ class _TestCommon: assert_array_equal(dat*17.3,(datsp*17.3).todense()) for dtype in self.checked_dtypes: - fails = ((dtype == np.typeDict['int']) and + fails = ((dtype == np.typeDict['int']) and (self.__class__ == TestLIL or self.__class__ == TestDOK)) msg = "LIL and DOK type's __mul__ method has problems with int data." @@ -403,7 +415,7 @@ class _TestCommon: assert_array_equal(17.3*dat,(17.3*datsp).todense()) for dtype in self.checked_dtypes: - fails = ((dtype == np.typeDict['int']) and + fails = ((dtype == np.typeDict['int']) and (self.__class__ == TestLIL or self.__class__ == TestDOK)) msg = "LIL and DOK type's __rmul__ method has problems with int data." @@ -1759,7 +1771,7 @@ def sparse_test_class(getset=True, slicing=True, slicing_assign=True, class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csr_matrix - checked_dtypes = [np.bool_, np.int_, np.float_] + checked_dtypes = [np.bool_, np.int_, np.float_, np.complex_] def test_constructor1(self): b = matrix([[0,4,0], @@ -1898,7 +1910,7 @@ class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, class TestCSC(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csc_matrix - checked_dtypes = [np.bool_, np.int_, np.float_] + checked_dtypes = [np.bool_, np.int_, np.float_, np.complex_] def test_constructor1(self): b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') @@ -2025,7 +2037,7 @@ class TestDOK(sparse_test_class(slicing=False, fancy_assign=False, minmax=False)): spmatrix = dok_matrix - checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.int_, np.float_, np.complex_] def test_mult(self): A = dok_matrix((10,10)) @@ -2170,7 +2182,7 @@ class TestDOK(sparse_test_class(slicing=False, class TestLIL(sparse_test_class(minmax=False)): spmatrix = lil_matrix - checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.int_, np.float_, np.complex_] def test_dot(self): A = matrix(zeros((10,10))) @@ -2283,7 +2295,7 @@ class TestCOO(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spmatrix = coo_matrix - checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.int_, np.float_, np.complex_] def test_constructor1(self): # unsorted triplet format @@ -2344,7 +2356,7 @@ class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=Fals fancy_indexing=False, fancy_assign=False, minmax=False)): spmatrix = dia_matrix - checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.int_, np.float_, np.complex_] def test_constructor1(self): D = matrix([[1, 0, 3, 0], @@ -2364,7 +2376,7 @@ class TestBSR(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spmatrix = bsr_matrix - checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.int_, np.float_, np.complex_] def test_constructor1(self): # check native BSR format constructor commit 27923d625d174c1b231c1f9e2de9a8bcb1375b03 Author: Blake Griffith Date: Fri Jun 7 17:23:41 2013 -0500 BUG: Change Spares' sum and mean methods to match Numpy's buhavior. diff --git a/scipy/sparse/base.py b/scipy/sparse/base.py index 5784527..303e253 100644 --- a/scipy/sparse/base.py +++ b/scipy/sparse/base.py @@ -572,15 +572,28 @@ class spmatrix(object): # For some sparse matrix formats more efficient methods are # possible -- these should override this function. m, n = self.shape + + # Mimic numpy's casting. + if np.issubdtype(self.dtype, np.float_): + res_dtype = np.float_ + elif (np.issubdtype(self.dtype, np.int_) or + np.issubdtype(self.dtype, np.bool_)): + res_dtype = np.int_ + elif np.issubdtype(self.dtype, np.complex_): + res_dtype = np.complex_ + else: + res_dtype = self.dtype + + # Calculate the sum. if axis == 0: # sum over columns - return np.asmatrix(np.ones((1, m), dtype=self.dtype)) * self + return np.asmatrix(np.ones((1, m), dtype=res_dtype)) * self elif axis == 1: # sum over rows - return self * np.asmatrix(np.ones((n, 1), dtype=self.dtype)) + return self * np.asmatrix(np.ones((n, 1), dtype=res_dtype)) elif axis is None: # sum over rows and columns - return (self * np.asmatrix(np.ones((n, 1), dtype=self.dtype))).sum() + return (self * np.asmatrix(np.ones((n, 1), dtype=res_dtype))).sum() else: raise ValueError("axis out of bounds") @@ -588,12 +601,23 @@ class spmatrix(object): """Average the matrix over the given axis. If the axis is None, average over both rows and columns, returning a scalar. """ + # Mimic numpy's casting. + if (np.issubdtype(self.dtype, np.float_) or + np.issubdtype(self.dtype, np.int_) or + np.issubdtype(self.dtype, np.bool_)): + res_dtype = np.float_ + elif np.issubdtype(self.dtype, np.complex_): + res_dtype = np.complex_ + else: + res_dtype = self.dtype + + # Calculate the mean. if axis == 0: - mean = self.sum(0) + mean = self.astype(res_dtype).sum(0) mean *= 1.0 / self.shape[0] return mean elif axis == 1: - mean = self.sum(1) + mean = self.astype(res_dtype).sum(1) mean *= 1.0 / self.shape[1] return mean elif axis is None: diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index beba9af..bd5cefb 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -165,18 +165,32 @@ class _TestCommon: assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1]) def test_sum(self): - # Does the matrix's .sum(axis=...) method work? - assert_array_equal(self.dat.sum(), self.datsp.sum()) - assert_array_equal(self.dat.sum(axis=None), self.datsp.sum(axis=None)) - assert_almost_equal(self.dat.sum(axis=0), self.datsp.sum(axis=0)) - assert_almost_equal(self.dat.sum(axis=1), self.datsp.sum(axis=1)) + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # Does the matrix's .sum(axis=...) method work? + assert_array_equal(dat.sum(), datsp.sum()) + assert_array_equal(dat.sum(axis=None), datsp.sum(axis=None)) + assert_array_equal(dat.sum(axis=0), datsp.sum(axis=0)) + assert_array_equal(dat.sum(axis=1), datsp.sum(axis=1)) + + for dtype in self.checked_dtypes: + yield check, dtype def test_mean(self): - # Does the matrix's .mean(axis=...) method work? - assert_array_equal(self.dat.mean(), self.datsp.mean()) - assert_array_equal(self.dat.mean(axis=None), self.datsp.mean(axis=None)) - assert_almost_equal(self.dat.mean(axis=0), self.datsp.mean(axis=0)) - assert_almost_equal(self.dat.mean(axis=1), self.datsp.mean(axis=1)) + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + + # Does the matrix's .mean(axis=...) method work? + assert_array_equal(dat.mean(), datsp.mean()) + assert_array_equal(dat.mean(axis=None), datsp.mean(axis=None)) + assert_array_equal(dat.mean(axis=0), datsp.mean(axis=0)) + assert_array_equal(dat.mean(axis=1), datsp.mean(axis=1)) + + for dtype in self.checked_dtypes: + yield check, dtype def test_expm(self): M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) commit ed4256ecde8bc7436990bb27b6387ab8695ea8c5 Author: Blake Griffith Date: Tue Jun 18 10:41:13 2013 -0500 TST: Tests for __bool__. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 470d6a2..2b513de 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -77,6 +77,19 @@ class _TestCommon: assert_equal(self.datsp.todense(), self.datsp_dtypes[np.float64].todense()) + def test_bool(self): + def check(dtype): + datsp = self.datsp_dtypes[dtype] + + assert_raises(ValueError, bool, datsp) + assert_(self.spmatrix([1])) + assert_(not self.spmatrix([0])) + for dtype in self.checked_dtypes: + fails = self.__class__ == TestDOK + msg = "Cannot create a rank <= 2 DOK matrix." + yield dec.skipif(fails, msg)(check), dtype + + def test_eq(self): def check(dtype): dat = self.dat_dtypes[dtype] @@ -106,7 +119,7 @@ class _TestCommon: fails = not (self.__class__ == TestBSR or self.__class__ == TestCSC or self.__class__ == TestCSR) - msg = "Bool cjmparisons only implemented for CSC and CSR." + msg = "Bool comparisons only implemented for CSC and CSR." yield dec.skipif(fails, msg)(check), dtype def test_ne(self): commit 28af49a7e7c1eea691d52ea3c7ac6d3020397b78 Author: Blake Griffith Date: Mon Jun 17 16:41:51 2013 -0500 TST: Fixed a nonrelevant error that occured relating to a change in sparse. diff --git a/scipy/io/matlab/tests/test_mio.py b/scipy/io/matlab/tests/test_mio.py index eabac2d..a4e48f0 100644 --- a/scipy/io/matlab/tests/test_mio.py +++ b/scipy/io/matlab/tests/test_mio.py @@ -108,8 +108,7 @@ case_table4.append( case_table4.append( {'name': 'multi', 'classes': {'theta': 'double', 'a': 'double'}, - 'expected': {'theta': theta, - 'a': A}, + 'expected': {'theta': theta, 'a': A}, }) case_table4.append( {'name': 'minus', @@ -381,9 +380,10 @@ def test_whos(): # generator for round trip tests def test_round_trip(): for case in case_table4 + case_table5_rt: + case_table4_names = [case['name'] for case in case_table4] name = case['name'] + '_round_trip' expected = case['expected'] - for format in (case in case_table4 and ['4', '5'] or ['5']): + for format in (['4', '5'] if case['name'] in case_table4_names else ['5']): yield _rt_check_case, name, expected, format commit e4525f2a305078662f8b730a1fc9ccbe51174a32 Author: Blake Griffith Date: Mon Jun 17 16:46:58 2013 -0500 API: Change sparse matrix __bool__ & __nonzero__ to raise a ValueError the same way NumPy does. diff --git a/scipy/sparse/base.py b/scipy/sparse/base.py index d4da8b3..584a837 100644 --- a/scipy/sparse/base.py +++ b/scipy/sparse/base.py @@ -176,12 +176,13 @@ class spmatrix(object): return out - if sys.version_info[0] >= 3: - def __bool__(self): # Simple -- other ideas? - return self.getnnz() > 0 - else: - def __nonzero__(self): # Simple -- other ideas? - return self.getnnz() > 0 + def __bool__(self): # Simple -- other ideas? + if self.shape == (1, 1): + return True if self.nnz == 1 else False + else: + raise ValueError("The truth value of an array with more than one " + "element is ambiguous. Use a.any() or a.all().") + __nonzero__ = __bool__ # What should len(sparse) return? For consistency with dense matrices, # perhaps it should be the number of rows? But for some uses the number of commit 42420f14c14a622149024a6514b8b7f10620d5bc Author: Blake Griffith Date: Mon Jun 17 23:26:27 2013 -0500 BUG: Homogenezie nnz type to be int for all sparse matrix types. diff --git a/scipy/sparse/bsr.py b/scipy/sparse/bsr.py index a1319da..5180b1d 100644 --- a/scipy/sparse/bsr.py +++ b/scipy/sparse/bsr.py @@ -264,7 +264,7 @@ class bsr_matrix(_cs_matrix, _minmax_mixin): def getnnz(self): R,C = self.blocksize - return self.indptr[-1] * R * C + return int(self.indptr[-1] * R * C) nnz = property(fget=getnnz) def __repr__(self): diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index b00a273..e8de617 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -84,7 +84,7 @@ class _cs_matrix(_data_matrix, _minmax_mixin): self.check_format(full_check=False) def getnnz(self): - return self.indptr[-1] + return int(self.indptr[-1]) nnz = property(fget=getnnz) def _set_self(self, other, copy=False): diff --git a/scipy/sparse/coo.py b/scipy/sparse/coo.py index 323dc50..ac78456 100644 --- a/scipy/sparse/coo.py +++ b/scipy/sparse/coo.py @@ -199,7 +199,7 @@ class coo_matrix(_data_matrix, _minmax_mixin): if np.rank(self.data) != 1 or np.rank(self.row) != 1 or np.rank(self.col) != 1: raise ValueError('row, column, and data arrays must have rank 1') - return nnz + return int(nnz) nnz = property(fget=getnnz) def _check(self): diff --git a/scipy/sparse/dia.py b/scipy/sparse/dia.py index 93b0db4..4b3c06a 100644 --- a/scipy/sparse/dia.py +++ b/scipy/sparse/dia.py @@ -160,7 +160,7 @@ class dia_matrix(_data_matrix): nnz += min(M,N-k) else: nnz += min(M+k,N) - return nnz + return int(nnz) nnz = property(fget=getnnz) commit 18fd520dd9c3c613041dde8296f19f5708ae4169 Author: Blake Griffith Date: Wed Jun 12 15:22:50 2013 -0500 TST: Added tests for == & != operators for BSR, CSC, and CSR. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index beba9af..470d6a2 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -77,6 +77,70 @@ class _TestCommon: assert_equal(self.datsp.todense(), self.datsp_dtypes[np.float64].todense()) + def test_eq(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datbsr = bsr_matrix(dat) + datcsr = csr_matrix(dat) + datcsc = csc_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal(dat == dat2, (datsp == datsp2).todense()) + # mix sparse types + assert_array_equal(dat == dat2, (datbsr == datsp2).todense()) + assert_array_equal(dat == dat2, (datcsr == datsp2).todense()) + assert_array_equal(dat == dat2, (datcsc == datsp2).todense()) + assert_array_equal(dat == dat2, (datlil == datsp2).todense()) + # sparse/dense + assert_array_equal(dat == datsp2, datsp2 == dat) + # sparse/scalar + assert_array_equal(dat == 0, (datsp == 0).todense()) + assert_array_equal(dat == 1, (datsp == 1).todense()) + + for dtype in self.checked_dtypes: + fails = not (self.__class__ == TestBSR or + self.__class__ == TestCSC or + self.__class__ == TestCSR) + msg = "Bool cjmparisons only implemented for CSC and CSR." + yield dec.skipif(fails, msg)(check), dtype + + def test_ne(self): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + dat2 = dat.copy() + dat2[:,0] = 0 + datsp2 = self.spmatrix(dat2) + datbsr = bsr_matrix(dat) + datcsc = csc_matrix(dat) + datcsr = csr_matrix(dat) + datlil = lil_matrix(dat) + + # sparse/sparse + assert_array_equal(dat != dat2, (datsp != datsp2).todense()) + # mix sparse types + assert_array_equal(dat != dat2, (datbsr != datsp2).todense()) + assert_array_equal(dat != dat2, (datcsc != datsp2).todense()) + assert_array_equal(dat != dat2, (datcsr != datsp2).todense()) + assert_array_equal(dat != dat2, (datlil != datsp2).todense()) + # sparse/dense + assert_array_equal(dat != datsp2, datsp2 != dat) + # sparse/scalar + assert_array_equal(dat != 0, (datsp != 0).todense()) + assert_array_equal(dat != 1, (datsp != 1).todense()) + + for dtype in self.checked_dtypes: + fails = not (self.__class__ == TestBSR or + self.__class__ == TestCSC or + self.__class__ == TestCSR) + msg = "Bool comparisons only implemented for CSC and CSR." + yield dec.skipif(fails, msg)(check), dtype + def test_empty(self): # create empty matrices assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3))) @@ -374,7 +438,7 @@ class _TestCommon: assert_array_equal(dat*17.3,(datsp*17.3).todense()) for dtype in self.checked_dtypes: - fails = ((dtype == np.typeDict['int']) and + fails = ((dtype == np.typeDict['int']) and (self.__class__ == TestLIL or self.__class__ == TestDOK)) msg = "LIL and DOK type's __mul__ method has problems with int data." @@ -389,7 +453,7 @@ class _TestCommon: assert_array_equal(17.3*dat,(17.3*datsp).todense()) for dtype in self.checked_dtypes: - fails = ((dtype == np.typeDict['int']) and + fails = ((dtype == np.typeDict['int']) and (self.__class__ == TestLIL or self.__class__ == TestDOK)) msg = "LIL and DOK type's __rmul__ method has problems with int data." commit a6983fce44ca8e729ae6d4e0e1587fa02b1deee1 Author: Blake Griffith Date: Wed Jun 12 14:59:17 2013 -0500 ENH: Add == & != functionality for BSR, CSC, and CSR sparse matrices. diff --git a/scipy/sparse/base.py b/scipy/sparse/base.py index 5784527..d4da8b3 100644 --- a/scipy/sparse/base.py +++ b/scipy/sparse/base.py @@ -226,6 +226,12 @@ class spmatrix(object): def dot(self, other): return self * other + def __eq__(self, other): + return self.tocsr().__eq__(other) + + def __ne__(self, other): + return self.tocsr().__ne__(other) + def __abs__(self): return abs(self.tocsr()) diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index b00a273..c0cc2db 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -165,6 +165,73 @@ class _cs_matrix(_data_matrix, _minmax_mixin): # assert(self.has_sorted_indices()) # TODO check for duplicates? + ####################### + # Boolean comparisons # + ####################### + + def __eq__(self, other): + # Scalar other. + if isscalarlike(other): + other_arr = self.copy() + other_arr.data[:] = other + res = self._binopt(other_arr,'_ne_').astype(bool) + if other == 0: + warn("Comparing a sparse matrix with 0 using == is inefficient" + ", try using != instead.") + all_true = self.__class__(np.ones(self.shape).astype(bool)) + return all_true - res + else: + self_as_bool = self.astype(bool) + return self_as_bool - res + # Dense other. + elif isdense(other): + return self.todense() == other + # Sparse other. + elif isspmatrix(other): + warn("Comparing sparse matrices using == is inefficient, try using" + " != instead.") + #TODO sparse broadcasting + if self.shape != other.shape: + return False + elif self.format != other.format: + other = other.asformat(self.format) + res = self._binopt(other,'_ne_').astype(bool) + all_true = self.__class__(np.ones(self.shape).astype(bool)) + return all_true - res + else: + return False + + def __ne__(self, other): + # Scalar other. + if isscalarlike(other): + if other != 0: + warn("Comparing a sparse matrix with a nonzero scalar using !=" + " is inefficient, try using == instead.") + all_true = self.__class__(np.ones(self.shape).astype(bool)) + res = (self == other) + return all_true - res + else: + other_arr = self.copy() + other_arr.data[:] = other + return self._binopt(other_arr,'_ne_').astype(bool) + # Dense other. + elif isdense(other): + return self.todense() != other + # Sparse other. + elif isspmatrix(other): + #TODO sparse broadcasting + if self.shape != other.shape: + return True + elif self.format != other.format: + other = other.asformat(self.format) + return self._binopt(other,'_ne_').astype(bool) + else: + return True + + ################################# + # Arithmatic operator overrides # + ################################# + def __add__(self,other): # First check if argument is a scalar if isscalarlike(other): commit acb3be63c55ca64c4555f9725174b082071b43a7 Author: Blake Griffith Date: Tue Jun 11 22:08:24 2013 -0500 ENH: Regenerated SWIG wrapper to accomodate changes with BSR, CSC, and CSR. diff --git a/scipy/sparse/sparsetools/bsr.py b/scipy/sparse/sparsetools/bsr.py index 27b7a6f..1942d5a 100644 Binary files a/scipy/sparse/sparsetools/bsr.py and b/scipy/sparse/sparsetools/bsr.py differ diff --git a/scipy/sparse/sparsetools/bsr_wrap.cxx b/scipy/sparse/sparsetools/bsr_wrap.cxx index d1bd5ae..00dc783 100644 Binary files a/scipy/sparse/sparsetools/bsr_wrap.cxx and b/scipy/sparse/sparsetools/bsr_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csc.py b/scipy/sparse/sparsetools/csc.py index fdb034f..3c419e1 100644 Binary files a/scipy/sparse/sparsetools/csc.py and b/scipy/sparse/sparsetools/csc.py differ diff --git a/scipy/sparse/sparsetools/csc_wrap.cxx b/scipy/sparse/sparsetools/csc_wrap.cxx index 4a0f49b..fca7d3f 100644 Binary files a/scipy/sparse/sparsetools/csc_wrap.cxx and b/scipy/sparse/sparsetools/csc_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csr.py b/scipy/sparse/sparsetools/csr.py index b15e2db..6b8a43f 100644 Binary files a/scipy/sparse/sparsetools/csr.py and b/scipy/sparse/sparsetools/csr.py differ diff --git a/scipy/sparse/sparsetools/csr_wrap.cxx b/scipy/sparse/sparsetools/csr_wrap.cxx index 62b2267..0791111 100644 Binary files a/scipy/sparse/sparsetools/csr_wrap.cxx and b/scipy/sparse/sparsetools/csr_wrap.cxx differ commit 1d820d0b1c1d864f3be459458d29f5f277e83985 Author: Blake Griffith Date: Tue Jun 11 20:41:54 2013 -0500 ENH: Added != binop to BSR, CSC, and CSR files in sparsetools/ and instantiate them. diff --git a/scipy/sparse/sparsetools/bsr.h b/scipy/sparse/sparsetools/bsr.h index 36eb05f..464cf2a 100644 --- a/scipy/sparse/sparsetools/bsr.h +++ b/scipy/sparse/sparsetools/bsr.h @@ -594,6 +594,15 @@ void bsr_binop_bsr(const I n_brow, const I n_bcol, /* element-wise binary operations */ template +void bsr_ne_bsr(const I n_row, const I n_col, const I R, const I C, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T Cx[]) +{ + bsr_binop_bsr(n_row,n_col,R,C,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::not_equal_to()); +} + +template void bsr_elmul_bsr(const I n_row, const I n_col, const I R, const I C, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], diff --git a/scipy/sparse/sparsetools/bsr.i b/scipy/sparse/sparsetools/bsr.i index 6628ed7..e721249 100644 --- a/scipy/sparse/sparsetools/bsr.i +++ b/scipy/sparse/sparsetools/bsr.i @@ -17,6 +17,7 @@ INSTANTIATE_ALL(bsr_transpose) INSTANTIATE_ALL(bsr_matmat_pass2) INSTANTIATE_ALL(bsr_matvec) INSTANTIATE_ALL(bsr_matvecs) +INSTANTIATE_ALL(bsr_ne_bsr) INSTANTIATE_ALL(bsr_elmul_bsr) INSTANTIATE_ALL(bsr_eldiv_bsr) INSTANTIATE_ALL(bsr_plus_bsr) diff --git a/scipy/sparse/sparsetools/csc.h b/scipy/sparse/sparsetools/csc.h index aac9b0a..0c221ba 100644 --- a/scipy/sparse/sparsetools/csc.h +++ b/scipy/sparse/sparsetools/csc.h @@ -140,6 +140,15 @@ void csc_matmat_pass2(const I n_row, +template +void csc_ne_csc(const I n_row, const I n_col, + const I Ap[], const I Ai[], const T Ax[], + const I Bp[], const I Bi[], const T Bx[], + I Cp[], I Ci[], T Cx[]) +{ + csr_ne_csr(n_col, n_row, Ap, Ai, Ax, Bp, Bi, Bx, Cp, Ci, Cx); +} + template void csc_elmul_csc(const I n_row, const I n_col, diff --git a/scipy/sparse/sparsetools/csc.i b/scipy/sparse/sparsetools/csc.i index d4d1772..29f6f26 100644 --- a/scipy/sparse/sparsetools/csc.i +++ b/scipy/sparse/sparsetools/csc.i @@ -16,9 +16,8 @@ INSTANTIATE_ALL(csc_tocsr) INSTANTIATE_ALL(csc_matmat_pass2) INSTANTIATE_ALL(csc_matvec) INSTANTIATE_ALL(csc_matvecs) +INSTANTIATE_ALL(csc_ne_csc) INSTANTIATE_ALL(csc_elmul_csc) INSTANTIATE_ALL(csc_eldiv_csc) INSTANTIATE_ALL(csc_plus_csc) INSTANTIATE_ALL(csc_minus_csc) - - diff --git a/scipy/sparse/sparsetools/csr.h b/scipy/sparse/sparsetools/csr.h index 168993d..6c59a25 100644 --- a/scipy/sparse/sparsetools/csr.h +++ b/scipy/sparse/sparsetools/csr.h @@ -859,6 +859,15 @@ void csr_binop_csr(const I n_row, /* element-wise binary operations*/ template +void csr_ne_csr(const I n_row, const I n_col, + const I Ap[], const I Aj[], const T Ax[], + const I Bp[], const I Bj[], const T Bx[], + I Cp[], I Cj[], T Cx[]) +{ + csr_binop_csr(n_row,n_col,Ap,Aj,Ax,Bp,Bj,Bx,Cp,Cj,Cx,std::not_equal_to()); +} + +template void csr_elmul_csr(const I n_row, const I n_col, const I Ap[], const I Aj[], const T Ax[], const I Bp[], const I Bj[], const T Bx[], diff --git a/scipy/sparse/sparsetools/csr.i b/scipy/sparse/sparsetools/csr.i index db661de..9bb237b 100644 --- a/scipy/sparse/sparsetools/csr.i +++ b/scipy/sparse/sparsetools/csr.i @@ -23,6 +23,7 @@ INSTANTIATE_ALL(csr_tobsr) INSTANTIATE_ALL(csr_matmat_pass2) INSTANTIATE_ALL(csr_matvec) INSTANTIATE_ALL(csr_matvecs) +INSTANTIATE_ALL(csr_ne_csr) INSTANTIATE_ALL(csr_elmul_csr) INSTANTIATE_ALL(csr_eldiv_csr) INSTANTIATE_ALL(csr_plus_csr) commit 5ebbf8b469a2d779008273ebd27f7ae0588793e7 Author: Blake Griffith Date: Fri Jun 7 17:34:54 2013 -0500 STY: Changed yield-check idiom to pass dtype instead of arrays. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index c856f94..beba9af 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -366,7 +366,10 @@ class _TestCommon: assert_(B is C) def test_mul_scalar(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + assert_array_equal(dat*2,(datsp*2).todense()) assert_array_equal(dat*17.3,(datsp*17.3).todense()) @@ -375,12 +378,13 @@ class _TestCommon: (self.__class__ == TestLIL or self.__class__ == TestDOK)) msg = "LIL and DOK type's __mul__ method has problems with int data." - yield (dec.knownfailureif(fails, msg)(check), - self.dat_dtypes[dtype], - self.datsp_dtypes[dtype]) + yield dec.knownfailureif(fails, msg)(check), dtype def test_rmul_scalar(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + assert_array_equal(2*dat,(2*datsp).todense()) assert_array_equal(17.3*dat,(17.3*datsp).todense()) @@ -389,12 +393,13 @@ class _TestCommon: (self.__class__ == TestLIL or self.__class__ == TestDOK)) msg = "LIL and DOK type's __rmul__ method has problems with int data." - yield (dec.knownfailureif(fails, msg)(check), - self.dat_dtypes[dtype], - self.datsp_dtypes[dtype]) + yield dec.knownfailureif(fails, msg)(check), dtype def test_add(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + a = dat.copy() a[0,2] = 2.0 b = datsp @@ -402,10 +407,13 @@ class _TestCommon: assert_array_equal(c, b.todense() + a) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_radd(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + a = self.dat.copy() a[0,2] = 2.0 b = self.datsp @@ -413,10 +421,13 @@ class _TestCommon: assert_array_equal(c, a + b.todense()) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_sub(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + assert_array_equal((datsp - datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) @@ -424,10 +435,13 @@ class _TestCommon: assert_array_equal((A - datsp).todense(),A.todense() - dat) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_rsub(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) @@ -438,10 +452,13 @@ class _TestCommon: assert_array_equal(datsp - A.todense(),dat - A.todense()) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_add0(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + # Adding 0 to a sparse matrix assert_array_equal((datsp + 0).todense(), dat) # use sum (which takes 0 as a starting value) @@ -450,7 +467,7 @@ class _TestCommon: assert_almost_equal(sumS.todense(), sumD) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_elementwise_multiply(self): # real/real @@ -701,7 +718,10 @@ class _TestCommon: assert_equal(fn(blocksize=(X,Y)).todense(), A) def test_transpose(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + a = datsp.transpose() b = dat.transpose() assert_array_equal(a.todense(), b) @@ -710,10 +730,13 @@ class _TestCommon: assert_array_equal(self.spmatrix((3,4)).T.todense(), zeros((4,3))) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_add_dense(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + # adding a dense matrix to a sparse matrix sum1 = dat + datsp assert_array_equal(sum1, dat + dat) @@ -721,11 +744,14 @@ class _TestCommon: assert_array_equal(sum2, dat + dat) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_sub_dense(self): # subtracting a dense matrix to/from a sparse matrix - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + # Behavior is different for bool. if dat.dtype == bool: sum1 = dat - datsp @@ -741,7 +767,7 @@ class _TestCommon: assert_array_equal(sum2, dat + dat) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_copy(self): # Check whether the copy=True and copy=False keywords work @@ -804,7 +830,10 @@ class _TestCommon: class _TestInplaceArithmetic: def test_imul_scalar(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + a = datsp.copy() a *= 2 b = dat.copy() @@ -818,10 +847,13 @@ class _TestInplaceArithmetic: assert_array_equal(b, a.todense()) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype def test_idiv_scalar(self): - def check(dat, datsp): + def check(dtype): + dat = self.dat_dtypes[dtype] + datsp = self.datsp_dtypes[dtype] + a = datsp.copy() a /= 2 b = dat.copy() @@ -835,7 +867,7 @@ class _TestInplaceArithmetic: assert_array_equal(b, a.todense()) for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + yield check, dtype class _TestGetSet: commit 5c470e6d8e596c596278a555b40f12d954e150aa Author: Blake Griffith Date: Fri Jun 7 15:51:40 2013 -0500 STY: checked_dtypes is now simpler list of numpy types. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index a96ad4c..c856f94 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1713,7 +1713,7 @@ def sparse_test_class(getset=True, slicing=True, slicing_assign=True, class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csr_matrix - checked_dtypes = [np.typeDict[x] for x in ['bool', 'int', 'float']] + checked_dtypes = [np.bool_, np.int_, np.float_] def test_constructor1(self): b = matrix([[0,4,0], @@ -1852,7 +1852,7 @@ class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, class TestCSC(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csc_matrix - checked_dtypes = [np.typeDict[x] for x in ['bool', 'int', 'float']] + checked_dtypes = [np.bool_, np.int_, np.float_] def test_constructor1(self): b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') commit 91ace7fa53cf262a611059d52b5a5e0b51ddd3b0 Author: Blake Griffith Date: Fri Jun 7 15:45:29 2013 -0500 REV: Reverting tests for bool and int data with the sum and mean methods. To be submitted in a seperate PR. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index ba9d0ed..a96ad4c 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -165,24 +165,18 @@ class _TestCommon: assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1]) def test_sum(self): - def check(dat, datsp): - # Does the matrix's .sum(axis=...) method work? - assert_array_equal(dat.sum(), datsp.sum()) - assert_array_equal(dat.sum(axis=None), datsp.sum(axis=None)) - assert_almost_equal(dat.sum(axis=0), datsp.sum(axis=0)) - assert_almost_equal(dat.sum(axis=1), datsp.sum(axis=1)) - for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + # Does the matrix's .sum(axis=...) method work? + assert_array_equal(self.dat.sum(), self.datsp.sum()) + assert_array_equal(self.dat.sum(axis=None), self.datsp.sum(axis=None)) + assert_almost_equal(self.dat.sum(axis=0), self.datsp.sum(axis=0)) + assert_almost_equal(self.dat.sum(axis=1), self.datsp.sum(axis=1)) def test_mean(self): - def check(dat, datsp): - # Does the matrix's .mean(axis=...) method work? - assert_array_equal(dat.mean(), datsp.mean()) - assert_array_equal(dat.mean(axis=None), datsp.mean(axis=None)) - assert_almost_equal(dat.mean(axis=0), datsp.mean(axis=0)) - assert_almost_equal(dat.mean(axis=1), datsp.mean(axis=1)) - for dtype in self.checked_dtypes: - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + # Does the matrix's .mean(axis=...) method work? + assert_array_equal(self.dat.mean(), self.datsp.mean()) + assert_array_equal(self.dat.mean(axis=None), self.datsp.mean(axis=None)) + assert_almost_equal(self.dat.mean(axis=0), self.datsp.mean(axis=0)) + assert_almost_equal(self.dat.mean(axis=1), self.datsp.mean(axis=1)) def test_expm(self): M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) commit 4700a009c107de610cd96d204fb8b9fe229e7a1f Author: Blake Griffith Date: Fri Jun 7 14:57:42 2013 -0500 STY: self.supported_dtypes -> self.checked_dtype, to avoind name collision. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index fd89a05..ba9d0ed 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -56,7 +56,7 @@ warnings.simplefilter('ignore', ComplexWarning) # TODO test has_sorted_indices class _TestCommon: """test common functionality shared by all sparse formats""" - supported_dtypes = supported_dtypes + checked_dtypes = supported_dtypes def __init__(self): # Cannonical data. @@ -67,7 +67,7 @@ class _TestCommon: # dtype. self.dat_dtypes = {} self.datsp_dtypes = {} - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: self.dat_dtypes[dtype] = self.dat.astype(dtype) self.datsp_dtypes[dtype] = self.spmatrix(self.dat.astype(dtype)) @@ -171,7 +171,7 @@ class _TestCommon: assert_array_equal(dat.sum(axis=None), datsp.sum(axis=None)) assert_almost_equal(dat.sum(axis=0), datsp.sum(axis=0)) assert_almost_equal(dat.sum(axis=1), datsp.sum(axis=1)) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_mean(self): @@ -181,7 +181,7 @@ class _TestCommon: assert_array_equal(dat.mean(axis=None), datsp.mean(axis=None)) assert_almost_equal(dat.mean(axis=0), datsp.mean(axis=0)) assert_almost_equal(dat.mean(axis=1), datsp.mean(axis=1)) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_expm(self): @@ -353,8 +353,6 @@ class _TestCommon: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) - # Note this is global supported_dtypes imported from - # sputils, not self.suppoted_dtypes. for x in supported_dtypes: assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type assert_equal(S.astype(x).toarray(), D.astype(x)) # correct values @@ -378,7 +376,7 @@ class _TestCommon: assert_array_equal(dat*2,(datsp*2).todense()) assert_array_equal(dat*17.3,(datsp*17.3).todense()) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: fails = ((dtype == np.typeDict['int']) and (self.__class__ == TestLIL or self.__class__ == TestDOK)) @@ -392,7 +390,7 @@ class _TestCommon: assert_array_equal(2*dat,(2*datsp).todense()) assert_array_equal(17.3*dat,(17.3*datsp).todense()) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: fails = ((dtype == np.typeDict['int']) and (self.__class__ == TestLIL or self.__class__ == TestDOK)) @@ -409,7 +407,7 @@ class _TestCommon: c = b + a assert_array_equal(c, b.todense() + a) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_radd(self): @@ -420,7 +418,7 @@ class _TestCommon: c = a + b assert_array_equal(c, a + b.todense()) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_sub(self): @@ -431,7 +429,7 @@ class _TestCommon: assert_array_equal((datsp - A).todense(),dat - A.todense()) assert_array_equal((A - datsp).todense(),A.todense() - dat) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_rsub(self): @@ -445,7 +443,7 @@ class _TestCommon: assert_array_equal(A.todense() - datsp,A.todense() - dat) assert_array_equal(datsp - A.todense(),dat - A.todense()) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_add0(self): @@ -457,7 +455,7 @@ class _TestCommon: sumD = sum([k * dat for k in range(1, 3)]) assert_almost_equal(sumS.todense(), sumD) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_elementwise_multiply(self): @@ -717,7 +715,7 @@ class _TestCommon: assert_array_equal(self.spmatrix((3,4)).T.todense(), zeros((4,3))) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_add_dense(self): @@ -728,7 +726,7 @@ class _TestCommon: sum2 = datsp + dat assert_array_equal(sum2, dat + dat) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_sub_dense(self): @@ -748,7 +746,7 @@ class _TestCommon: sum2 = (datsp + datsp + datsp) - dat assert_array_equal(sum2, dat + dat) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_copy(self): @@ -825,7 +823,7 @@ class _TestInplaceArithmetic: b *= 17.3 assert_array_equal(b, a.todense()) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_idiv_scalar(self): @@ -842,7 +840,7 @@ class _TestInplaceArithmetic: b /= 17.3 assert_array_equal(b, a.todense()) - for dtype in self.supported_dtypes: + for dtype in self.checked_dtypes: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] @@ -1549,8 +1547,6 @@ class _TestArithmetic: assert_array_equal((self.__Asp+self.__Bsp).todense(),self.__A+self.__B) # check conversions - # Note this is global supported_dtypes imported from sputils, - # not self.suppoted_dtypes. for x in supported_dtypes: A = self.__A.astype(x) Asp = self.spmatrix(A) @@ -1585,8 +1581,6 @@ class _TestArithmetic: # basic tests assert_array_equal((self.__Asp*self.__Bsp.T).todense(),self.__A*self.__B.T) - # Note this is global supported_dtypes imported from sputils, - # not self.suppoted_dtypes. for x in supported_dtypes: A = self.__A.astype(x) Asp = self.spmatrix(A) @@ -1725,7 +1719,7 @@ def sparse_test_class(getset=True, slicing=True, slicing_assign=True, class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csr_matrix - supported_dtypes = [np.typeDict[x] for x in ['bool', 'int', 'float']] + checked_dtypes = [np.typeDict[x] for x in ['bool', 'int', 'float']] def test_constructor1(self): b = matrix([[0,4,0], @@ -1864,7 +1858,7 @@ class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, class TestCSC(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csc_matrix - supported_dtypes = [np.typeDict[x] for x in ['bool', 'int', 'float']] + checked_dtypes = [np.typeDict[x] for x in ['bool', 'int', 'float']] def test_constructor1(self): b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') @@ -1991,7 +1985,7 @@ class TestDOK(sparse_test_class(slicing=False, fancy_assign=False, minmax=False)): spmatrix = dok_matrix - supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_mult(self): A = dok_matrix((10,10)) @@ -2136,7 +2130,7 @@ class TestDOK(sparse_test_class(slicing=False, class TestLIL(sparse_test_class(minmax=False)): spmatrix = lil_matrix - supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_dot(self): A = matrix(zeros((10,10))) @@ -2249,7 +2243,7 @@ class TestCOO(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spmatrix = coo_matrix - supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_constructor1(self): # unsorted triplet format @@ -2310,7 +2304,7 @@ class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=Fals fancy_indexing=False, fancy_assign=False, minmax=False)): spmatrix = dia_matrix - supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_constructor1(self): D = matrix([[1, 0, 3, 0], @@ -2330,7 +2324,7 @@ class TestBSR(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spmatrix = bsr_matrix - supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] + checked_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_constructor1(self): # check native BSR format constructor commit 6180ff5601f69bf32b5b6d0ac57cb848e53b2d7a Author: Blake Griffith Date: Fri Jun 7 14:49:43 2013 -0500 STY: Changes to mul & rmul tests as suggested by @pv. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index d81a14b..fd89a05 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -379,15 +379,13 @@ class _TestCommon: assert_array_equal(dat*17.3,(datsp*17.3).todense()) for dtype in self.supported_dtypes: - if (dtype == np.typeDict['int']) and ( - (self.__class__.__name__ == "TestLIL") or - (self.__class__.__name__ == "TestDOK")): - yield dec.knownfailureif( - True, - "LIL and DOK type's __mul__ method has problems with int data." - )(check) - continue - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + fails = ((dtype == np.typeDict['int']) and + (self.__class__ == TestLIL or + self.__class__ == TestDOK)) + msg = "LIL and DOK type's __mul__ method has problems with int data." + yield (dec.knownfailureif(fails, msg)(check), + self.dat_dtypes[dtype], + self.datsp_dtypes[dtype]) def test_rmul_scalar(self): def check(dat, datsp): @@ -395,15 +393,13 @@ class _TestCommon: assert_array_equal(17.3*dat,(17.3*datsp).todense()) for dtype in self.supported_dtypes: - if (dtype == np.typeDict['int']) and ( - (self.__class__.__name__ == "TestLIL") or - (self.__class__.__name__ == "TestDOK")): - yield dec.knownfailureif( - True, - "LIL and DOK type's __rmul__ method has problems with int data." - )(check) - continue - yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] + fails = ((dtype == np.typeDict['int']) and + (self.__class__ == TestLIL or + self.__class__ == TestDOK)) + msg = "LIL and DOK type's __rmul__ method has problems with int data." + yield (dec.knownfailureif(fails, msg)(check), + self.dat_dtypes[dtype], + self.datsp_dtypes[dtype]) def test_add(self): def check(dat, datsp): commit 40f06da6739cfcc18ccef9c1362686d169ddeb83 Author: Blake Griffith Date: Fri Jun 7 01:20:08 2013 -0500 PEP8: pep8'd my previous commits. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index e3d8d51..d81a14b 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -203,7 +203,7 @@ class _TestCommon: sM = self.spmatrix(M, shape=(3,3), dtype=dtype) sMinv = inv(sM) assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3)) - for dtype in [ float, bool ]: + for dtype in [float, bool]: yield check, dtype def test_from_array(self): @@ -353,7 +353,7 @@ class _TestCommon: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) - # Note this is global supported_dtypes imported from + # Note this is global supported_dtypes imported from # sputils, not self.suppoted_dtypes. for x in supported_dtypes: assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type @@ -380,10 +380,10 @@ class _TestCommon: for dtype in self.supported_dtypes: if (dtype == np.typeDict['int']) and ( - (self.__class__.__name__ == "TestLIL") or + (self.__class__.__name__ == "TestLIL") or (self.__class__.__name__ == "TestDOK")): yield dec.knownfailureif( - True, + True, "LIL and DOK type's __mul__ method has problems with int data." )(check) continue @@ -393,13 +393,13 @@ class _TestCommon: def check(dat, datsp): assert_array_equal(2*dat,(2*datsp).todense()) assert_array_equal(17.3*dat,(17.3*datsp).todense()) - + for dtype in self.supported_dtypes: if (dtype == np.typeDict['int']) and ( - (self.__class__.__name__ == "TestLIL") or + (self.__class__.__name__ == "TestLIL") or (self.__class__.__name__ == "TestDOK")): yield dec.knownfailureif( - True, + True, "LIL and DOK type's __rmul__ method has problems with int data." )(check) continue @@ -1553,7 +1553,7 @@ class _TestArithmetic: assert_array_equal((self.__Asp+self.__Bsp).todense(),self.__A+self.__B) # check conversions - # Note this is global supported_dtypes imported from sputils, + # Note this is global supported_dtypes imported from sputils, # not self.suppoted_dtypes. for x in supported_dtypes: A = self.__A.astype(x) @@ -1589,7 +1589,7 @@ class _TestArithmetic: # basic tests assert_array_equal((self.__Asp*self.__Bsp.T).todense(),self.__A*self.__B.T) - # Note this is global supported_dtypes imported from sputils, + # Note this is global supported_dtypes imported from sputils, # not self.suppoted_dtypes. for x in supported_dtypes: A = self.__A.astype(x) @@ -1729,7 +1729,7 @@ def sparse_test_class(getset=True, slicing=True, slicing_assign=True, class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csr_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'bool', 'int', 'float' ] ] + supported_dtypes = [np.typeDict[x] for x in ['bool', 'int', 'float']] def test_constructor1(self): b = matrix([[0,4,0], @@ -1868,7 +1868,7 @@ class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, class TestCSC(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csc_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'bool', 'int', 'float' ] ] + supported_dtypes = [np.typeDict[x] for x in ['bool', 'int', 'float']] def test_constructor1(self): b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') @@ -1995,7 +1995,7 @@ class TestDOK(sparse_test_class(slicing=False, fancy_assign=False, minmax=False)): spmatrix = dok_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] + supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_mult(self): A = dok_matrix((10,10)) @@ -2140,7 +2140,7 @@ class TestDOK(sparse_test_class(slicing=False, class TestLIL(sparse_test_class(minmax=False)): spmatrix = lil_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] + supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_dot(self): A = matrix(zeros((10,10))) @@ -2253,7 +2253,7 @@ class TestCOO(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spmatrix = coo_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] + supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_constructor1(self): # unsorted triplet format @@ -2314,7 +2314,7 @@ class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=Fals fancy_indexing=False, fancy_assign=False, minmax=False)): spmatrix = dia_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] + supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_constructor1(self): D = matrix([[1, 0, 3, 0], @@ -2334,7 +2334,7 @@ class TestBSR(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spmatrix = bsr_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] + supported_dtypes = [np.typeDict[x] for x in ['int', 'float']] def test_constructor1(self): # check native BSR format constructor commit 0d3ec0d63eb93fd808d9d1eb274c6bbd15255671 Author: Blake Griffith Date: Fri Jun 7 01:13:06 2013 -0500 TST: Parametric dtype tests (with bool) for sub, rsub, add0, transpose, add_dense, sub_dense, imul_scalar, & idiv_scalar. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index f48bd33..e3d8d51 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -428,29 +428,41 @@ class _TestCommon: yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_sub(self): - assert_array_equal((self.datsp - self.datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + def check(dat, datsp): + assert_array_equal((datsp - datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + + A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) + assert_array_equal((datsp - A).todense(),dat - A.todense()) + assert_array_equal((A - datsp).todense(),A.todense() - dat) - A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) - assert_array_equal((self.datsp - A).todense(),self.dat - A.todense()) - assert_array_equal((A - self.datsp).todense(),A.todense() - self.dat) + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_rsub(self): - assert_array_equal((self.dat - self.datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) - assert_array_equal((self.datsp - self.dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + def check(dat, datsp): + assert_array_equal((dat - datsp),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + assert_array_equal((datsp - dat),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) + + A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) + assert_array_equal((dat - A),dat - A.todense()) + assert_array_equal((A - dat),A.todense() - dat) + assert_array_equal(A.todense() - datsp,A.todense() - dat) + assert_array_equal(datsp - A.todense(),dat - A.todense()) - A = self.spmatrix(matrix([[1,0,0,4],[-1,0,0,0],[0,8,0,-5]],'d')) - assert_array_equal((self.dat - A),self.dat - A.todense()) - assert_array_equal((A - self.dat),A.todense() - self.dat) - assert_array_equal(A.todense() - self.datsp,A.todense() - self.dat) - assert_array_equal(self.datsp - A.todense(),self.dat - A.todense()) + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_add0(self): - # Adding 0 to a sparse matrix - assert_array_equal((self.datsp + 0).todense(), self.dat) - # use sum (which takes 0 as a starting value) - sumS = sum([k * self.datsp for k in range(1, 3)]) - sumD = sum([k * self.dat for k in range(1, 3)]) - assert_almost_equal(sumS.todense(), sumD) + def check(dat, datsp): + # Adding 0 to a sparse matrix + assert_array_equal((datsp + 0).todense(), dat) + # use sum (which takes 0 as a starting value) + sumS = sum([k * datsp for k in range(1, 3)]) + sumD = sum([k * dat for k in range(1, 3)]) + assert_almost_equal(sumS.todense(), sumD) + + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_elementwise_multiply(self): # real/real @@ -701,26 +713,47 @@ class _TestCommon: assert_equal(fn(blocksize=(X,Y)).todense(), A) def test_transpose(self): - a = self.datsp.transpose() - b = self.dat.transpose() - assert_array_equal(a.todense(), b) - assert_array_equal(a.transpose().todense(), self.dat) + def check(dat, datsp): + a = datsp.transpose() + b = dat.transpose() + assert_array_equal(a.todense(), b) + assert_array_equal(a.transpose().todense(), dat) + + assert_array_equal(self.spmatrix((3,4)).T.todense(), zeros((4,3))) - assert_array_equal(self.spmatrix((3,4)).T.todense(), zeros((4,3))) + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_add_dense(self): - # adding a dense matrix to a sparse matrix - sum1 = self.dat + self.datsp - assert_array_equal(sum1, 2*self.dat) - sum2 = self.datsp + self.dat - assert_array_equal(sum2, 2*self.dat) + def check(dat, datsp): + # adding a dense matrix to a sparse matrix + sum1 = dat + datsp + assert_array_equal(sum1, dat + dat) + sum2 = datsp + dat + assert_array_equal(sum2, dat + dat) + + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_sub_dense(self): # subtracting a dense matrix to/from a sparse matrix - sum1 = 3*self.dat - self.datsp - assert_array_equal(sum1, 2*self.dat) - sum2 = 3*self.datsp - self.dat - assert_array_equal(sum2, 2*self.dat) + def check(dat, datsp): + # Behavior is different for bool. + if dat.dtype == bool: + sum1 = dat - datsp + assert_array_equal(sum1, dat - dat) + sum2 = datsp - dat + assert_array_equal(sum2, dat - dat) + else: + # Manually add to avoid upcasting from scalar + # multiplication. + sum1 = (dat + dat + dat) - datsp + assert_array_equal(sum1, dat + dat) + sum2 = (datsp + datsp + datsp) - dat + assert_array_equal(sum2, dat + dat) + + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_copy(self): # Check whether the copy=True and copy=False keywords work @@ -783,22 +816,38 @@ class _TestCommon: class _TestInplaceArithmetic: def test_imul_scalar(self): - a = self.datsp.copy() - a *= 2 - assert_array_equal(self.dat*2,a.todense()) + def check(dat, datsp): + a = datsp.copy() + a *= 2 + b = dat.copy() + b *= 2 + assert_array_equal(b, a.todense()) + + a = datsp.copy() + a *= 17.3 + b = dat.copy() + b *= 17.3 + assert_array_equal(b, a.todense()) - a = self.datsp.copy() - a *= 17.3 - assert_array_equal(self.dat*17.3,a.todense()) + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_idiv_scalar(self): - a = self.datsp.copy() - a /= 2 - assert_array_equal(self.dat/2,a.todense()) + def check(dat, datsp): + a = datsp.copy() + a /= 2 + b = dat.copy() + b /= 2 + assert_array_equal(b, a.todense()) + + a = datsp.copy() + a /= 17.3 + b = dat.copy() + b /= 17.3 + assert_array_equal(b, a.todense()) - a = self.datsp.copy() - a /= 17.3 - assert_array_equal(self.dat/17.3,a.todense()) + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] class _TestGetSet: commit bdfec4db39e74482b7d64f1157784e22d13d30a9 Author: Blake Griffith Date: Thu Jun 6 17:54:42 2013 -0500 TST: Added 4 knownfailures to the testsuite for LIL & DOK type's __mul__ and __rmul__ with int data. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index e23cfe9..f48bd33 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -379,20 +379,29 @@ class _TestCommon: assert_array_equal(dat*17.3,(datsp*17.3).todense()) for dtype in self.supported_dtypes: - # TODO show somehow in testing that int data case is - # skipped. - if dtype == np.typeDict['int']: + if (dtype == np.typeDict['int']) and ( + (self.__class__.__name__ == "TestLIL") or + (self.__class__.__name__ == "TestDOK")): + yield dec.knownfailureif( + True, + "LIL and DOK type's __mul__ method has problems with int data." + )(check) continue yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_rmul_scalar(self): def check(dat, datsp): - assert_array_equal(2*self.dat,(2*self.datsp).todense()) - assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense()) + assert_array_equal(2*dat,(2*datsp).todense()) + assert_array_equal(17.3*dat,(17.3*datsp).todense()) for dtype in self.supported_dtypes: - # TODO show int is skipped. - if dtype == np.typeDict['int']: + if (dtype == np.typeDict['int']) and ( + (self.__class__.__name__ == "TestLIL") or + (self.__class__.__name__ == "TestDOK")): + yield dec.knownfailureif( + True, + "LIL and DOK type's __rmul__ method has problems with int data." + )(check) continue yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] commit 5c5c4fd6e350e144c52d84fd735ce445613e611b Author: Blake Griffith Date: Thu Jun 6 01:37:11 2013 -0500 TST: Added bool dtype testing to tests for rmul_scalar, mul_scalar, add, and radd. My solution for skipping a bug I found is LIL's multiplication is pretty hackish. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 455f98d..e23cfe9 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -374,26 +374,49 @@ class _TestCommon: assert_(B is C) def test_mul_scalar(self): - assert_array_equal(self.dat*2,(self.datsp*2).todense()) - assert_array_equal(self.dat*17.3,(self.datsp*17.3).todense()) + def check(dat, datsp): + assert_array_equal(dat*2,(datsp*2).todense()) + assert_array_equal(dat*17.3,(datsp*17.3).todense()) + + for dtype in self.supported_dtypes: + # TODO show somehow in testing that int data case is + # skipped. + if dtype == np.typeDict['int']: + continue + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_rmul_scalar(self): - assert_array_equal(2*self.dat,(2*self.datsp).todense()) - assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense()) + def check(dat, datsp): + assert_array_equal(2*self.dat,(2*self.datsp).todense()) + assert_array_equal(17.3*self.dat,(17.3*self.datsp).todense()) + + for dtype in self.supported_dtypes: + # TODO show int is skipped. + if dtype == np.typeDict['int']: + continue + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_add(self): - a = self.dat.copy() - a[0,2] = 2.0 - b = self.datsp - c = b + a - assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]]) + def check(dat, datsp): + a = dat.copy() + a[0,2] = 2.0 + b = datsp + c = b + a + assert_array_equal(c, b.todense() + a) + + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_radd(self): - a = self.dat.copy() - a[0,2] = 2.0 - b = self.datsp - c = a + b - assert_array_equal(c,[[2,0,2,4],[6,0,2,0],[0,4,0,0]]) + def check(dat, datsp): + a = self.dat.copy() + a[0,2] = 2.0 + b = self.datsp + c = a + b + assert_array_equal(c, a + b.todense()) + + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_sub(self): assert_array_equal((self.datsp - self.datsp).todense(),[[0,0,0,0],[0,0,0,0],[0,0,0,0]]) commit 0dc50ce47a40278b93a13703d186e018d70e6705 Author: Blake Griffith Date: Thu Jun 6 00:09:42 2013 -0500 TST: Added bool testing to test_inv.Added bool, to CSC and CSR's supported dtypes. Added comments noting global supported dtypes. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 4d59835..455f98d 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -198,10 +198,13 @@ class _TestCommon: assert_array_almost_equal((sNexp - Nexp), zeros((3, 3))) def test_inv(self): - M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) - sM = self.spmatrix(M, shape=(3,3), dtype=float) - sMinv = inv(sM) - assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3)) + def check(dtype): + M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], dtype) + sM = self.spmatrix(M, shape=(3,3), dtype=dtype) + sMinv = inv(sM) + assert_array_almost_equal(sMinv.dot(sM).todense(), np.eye(3)) + for dtype in [ float, bool ]: + yield check, dtype def test_from_array(self): A = array([[1,0,0],[2,3,4],[0,5,0],[0,0,0]]) @@ -350,6 +353,8 @@ class _TestCommon: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=np.ComplexWarning) + # Note this is global supported_dtypes imported from + # sputils, not self.suppoted_dtypes. for x in supported_dtypes: assert_equal(S.astype(x).dtype, D.astype(x).dtype) # correct type assert_equal(S.astype(x).toarray(), D.astype(x)) # correct values @@ -1467,6 +1472,8 @@ class _TestArithmetic: assert_array_equal((self.__Asp+self.__Bsp).todense(),self.__A+self.__B) # check conversions + # Note this is global supported_dtypes imported from sputils, + # not self.suppoted_dtypes. for x in supported_dtypes: A = self.__A.astype(x) Asp = self.spmatrix(A) @@ -1501,6 +1508,8 @@ class _TestArithmetic: # basic tests assert_array_equal((self.__Asp*self.__Bsp.T).todense(),self.__A*self.__B.T) + # Note this is global supported_dtypes imported from sputils, + # not self.suppoted_dtypes. for x in supported_dtypes: A = self.__A.astype(x) Asp = self.spmatrix(A) @@ -1639,7 +1648,7 @@ def sparse_test_class(getset=True, slicing=True, slicing_assign=True, class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csr_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] + supported_dtypes = [ np.typeDict[x] for x in [ 'bool', 'int', 'float' ] ] def test_constructor1(self): b = matrix([[0,4,0], @@ -1778,7 +1787,7 @@ class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, class TestCSC(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csc_matrix - supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] + supported_dtypes = [ np.typeDict[x] for x in [ 'bool', 'int', 'float' ] ] def test_constructor1(self): b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') commit c869d34d1785527db5a3ada4607763b94648805d Author: Blake Griffith Date: Wed Jun 5 19:39:47 2013 -0500 TST: Made supported_dtypes an attribute which is modified in the derived. Matrix type classes. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index af6742f..4d59835 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -56,9 +56,9 @@ warnings.simplefilter('ignore', ComplexWarning) # TODO test has_sorted_indices class _TestCommon: """test common functionality shared by all sparse formats""" + supported_dtypes = supported_dtypes def __init__(self): - self.supported_dtypes = supported_dtypes # Cannonical data. self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d') self.datsp = self.spmatrix(self.dat) @@ -1639,6 +1639,7 @@ def sparse_test_class(getset=True, slicing=True, slicing_assign=True, class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csr_matrix + supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] def test_constructor1(self): b = matrix([[0,4,0], @@ -1777,6 +1778,7 @@ class TestCSR(sparse_test_class(slicing_assign=False, fancy_assign=False, class TestCSC(sparse_test_class(slicing_assign=False, fancy_assign=False, fancy_multidim_indexing=False)): spmatrix = csc_matrix + supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] def test_constructor1(self): b = matrix([[1,0,0,0],[0,0,1,0],[0,2,0,3]],'d') @@ -1903,6 +1905,7 @@ class TestDOK(sparse_test_class(slicing=False, fancy_assign=False, minmax=False)): spmatrix = dok_matrix + supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] def test_mult(self): A = dok_matrix((10,10)) @@ -2047,6 +2050,7 @@ class TestDOK(sparse_test_class(slicing=False, class TestLIL(sparse_test_class(minmax=False)): spmatrix = lil_matrix + supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] def test_dot(self): A = matrix(zeros((10,10))) @@ -2159,6 +2163,7 @@ class TestCOO(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spmatrix = coo_matrix + supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] def test_constructor1(self): # unsorted triplet format @@ -2219,6 +2224,7 @@ class TestDIA(sparse_test_class(getset=False, slicing=False, slicing_assign=Fals fancy_indexing=False, fancy_assign=False, minmax=False)): spmatrix = dia_matrix + supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] def test_constructor1(self): D = matrix([[1, 0, 3, 0], @@ -2238,6 +2244,7 @@ class TestBSR(sparse_test_class(getset=False, slicing=False, slicing_assign=False, fancy_indexing=False, fancy_assign=False)): spmatrix = bsr_matrix + supported_dtypes = [ np.typeDict[x] for x in [ 'int', 'float' ] ] def test_constructor1(self): # check native BSR format constructor commit c0d2f8f73f25fc425a19608ddb306e42d1e94f5a Author: Blake Griffith Date: Wed Jun 5 11:30:33 2013 -0500 TST: Made iteration over dtype data reliable in test_mean, test_sum. Added supported_dtype as attribute. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 1410d80..af6742f 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -58,6 +58,7 @@ class _TestCommon: """test common functionality shared by all sparse formats""" def __init__(self): + self.supported_dtypes = supported_dtypes # Cannonical data. self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d') self.datsp = self.spmatrix(self.dat) @@ -66,7 +67,7 @@ class _TestCommon: # dtype. self.dat_dtypes = {} self.datsp_dtypes = {} - for dtype in supported_dtypes: + for dtype in self.supported_dtypes: self.dat_dtypes[dtype] = self.dat.astype(dtype) self.datsp_dtypes[dtype] = self.spmatrix(self.dat.astype(dtype)) @@ -170,9 +171,8 @@ class _TestCommon: assert_array_equal(dat.sum(axis=None), datsp.sum(axis=None)) assert_almost_equal(dat.sum(axis=0), datsp.sum(axis=0)) assert_almost_equal(dat.sum(axis=1), datsp.sum(axis=1)) - for dat, datsp in zip(self.dat_dtypes.values(), - self.datsp_dtypes.values()): - yield check, dat, datsp + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_mean(self): def check(dat, datsp): @@ -181,9 +181,8 @@ class _TestCommon: assert_array_equal(dat.mean(axis=None), datsp.mean(axis=None)) assert_almost_equal(dat.mean(axis=0), datsp.mean(axis=0)) assert_almost_equal(dat.mean(axis=1), datsp.mean(axis=1)) - for dat, datsp in zip(self.dat_dtypes.values(), - self.datsp_dtypes.values()): - yield check, dat, datsp + for dtype in self.supported_dtypes: + yield check, self.dat_dtypes[dtype], self.datsp_dtypes[dtype] def test_expm(self): M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) commit 6bd75d792e10eee01d47efc86f5994f9a1b019d0 Author: Blake Griffith Date: Wed Jun 5 01:29:03 2013 -0500 TST: Added testing of all dtypes to sum & mean methods. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 4583e35..1410d80 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -58,9 +58,24 @@ class _TestCommon: """test common functionality shared by all sparse formats""" def __init__(self): + # Cannonical data. self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d') self.datsp = self.spmatrix(self.dat) + # Some sparse and dense matrices with data for every supported + # dtype. + self.dat_dtypes = {} + self.datsp_dtypes = {} + for dtype in supported_dtypes: + self.dat_dtypes[dtype] = self.dat.astype(dtype) + self.datsp_dtypes[dtype] = self.spmatrix(self.dat.astype(dtype)) + + # Check that the original data is equivalent to the + # corresponding dat_dtypes & datsp_dtypes. + assert_equal(self.dat, self.dat_dtypes[np.float64]) + assert_equal(self.datsp.todense(), + self.datsp_dtypes[np.float64].todense()) + def test_empty(self): # create empty matrices assert_equal(self.spmatrix((3,3)).todense(), np.zeros((3,3))) @@ -149,18 +164,26 @@ class _TestCommon: assert_array_equal(self.datsp.getcol(-1).todense(), self.dat[:,-1]) def test_sum(self): - # Does the matrix's .sum(axis=...) method work? - assert_array_equal(self.dat.sum(), self.datsp.sum()) - assert_array_equal(self.dat.sum(axis=None), self.datsp.sum(axis=None)) - assert_array_equal(self.dat.sum(axis=0), self.datsp.sum(axis=0)) - assert_array_equal(self.dat.sum(axis=1), self.datsp.sum(axis=1)) + def check(dat, datsp): + # Does the matrix's .sum(axis=...) method work? + assert_array_equal(dat.sum(), datsp.sum()) + assert_array_equal(dat.sum(axis=None), datsp.sum(axis=None)) + assert_almost_equal(dat.sum(axis=0), datsp.sum(axis=0)) + assert_almost_equal(dat.sum(axis=1), datsp.sum(axis=1)) + for dat, datsp in zip(self.dat_dtypes.values(), + self.datsp_dtypes.values()): + yield check, dat, datsp def test_mean(self): - # Does the matrix's .mean(axis=...) method work? - assert_array_equal(self.dat.mean(), self.datsp.mean()) - assert_array_equal(self.dat.mean(axis=None), self.datsp.mean(axis=None)) - assert_array_equal(self.dat.mean(axis=0), self.datsp.mean(axis=0)) - assert_array_equal(self.dat.mean(axis=1), self.datsp.mean(axis=1)) + def check(dat, datsp): + # Does the matrix's .mean(axis=...) method work? + assert_array_equal(dat.mean(), datsp.mean()) + assert_array_equal(dat.mean(axis=None), datsp.mean(axis=None)) + assert_almost_equal(dat.mean(axis=0), datsp.mean(axis=0)) + assert_almost_equal(dat.mean(axis=1), datsp.mean(axis=1)) + for dat, datsp in zip(self.dat_dtypes.values(), + self.datsp_dtypes.values()): + yield check, dat, datsp def test_expm(self): M = array([[1, 0, 2], [0, 0, 3], [-4, 5, 6]], float) commit 604303db018750dd14edeac441c5c3360f5b027c Author: Blake Griffith Date: Wed Jun 5 00:45:14 2013 -0500 ENH: Added bool as a supporte_dtype in sputils. diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index 2a0ca48..2ecd5d0 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -9,10 +9,10 @@ __all__ = ['upcast','getdtype','isscalarlike','isintlike', import numpy as np # keep this list syncronized with sparsetools -#supported_dtypes = ['int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', +#supported_dtypes = ['bool', 'int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32', # 'int64', 'uint64', 'float32', 'float64', # 'complex64', 'complex128'] -supported_dtypes = ['int8','uint8','short','ushort','intc','uintc', +supported_dtypes = ['bool', 'int8','uint8','short','ushort','intc','uintc', 'longlong','ulonglong','single','double','longdouble', 'csingle','cdouble','clongdouble'] supported_dtypes = [np.typeDict[x] for x in supported_dtypes] @@ -32,7 +32,7 @@ def upcast(*args): >>> upcast('int32') >>> upcast('bool') - + >>> upcast('int32','float32') >>> upcast('bool',complex,float) commit 2274cdcd13819864bdb8110cdd2d2ebfd89edc24 Author: Blake Griffith Date: Wed Jun 5 00:11:18 2013 -0500 TST: Removed dependence on TestCase. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index a0f359a..4583e35 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -30,7 +30,7 @@ from numpy import arange, zeros, array, dot, matrix, asmatrix, asarray, \ import random from numpy.testing import assert_raises, assert_equal, assert_array_equal, \ assert_array_almost_equal, assert_almost_equal, assert_, \ - dec, TestCase, run_module_suite + dec, run_module_suite import scipy.linalg @@ -57,7 +57,7 @@ warnings.simplefilter('ignore', ComplexWarning) class _TestCommon: """test common functionality shared by all sparse formats""" - def setUp(self): + def __init__(self): self.dat = matrix([[1,0,0,2],[3,0,1,0],[0,2,0,0]],'d') self.datsp = self.spmatrix(self.dat) @@ -484,11 +484,11 @@ class _TestCommon: # invalid exponents for exponent in [-1, 2.2, 1 + 3j]: - self.assertRaises(Exception, B.__pow__, exponent) + assert_raises(Exception, B.__pow__, exponent) # nonsquare matrix B = self.spmatrix(A[:3,:]) - self.assertRaises(Exception, B.__pow__, 1) + assert_raises(Exception, B.__pow__, 1) def test_rmatvec(self): M = self.spmatrix(matrix([[3,0,0],[0,1,0],[2,0,3.0],[2,3,0]])) @@ -1593,8 +1593,7 @@ def sparse_test_class(getset=True, slicing=True, slicing_assign=True, fancy_indexing and fancy_multidim_indexing), _possibly_unimplemented(_TestFancyMultidimAssign, fancy_multidim_assign and fancy_assign), - _possibly_unimplemented(_TestMinMax, minmax), - TestCase) + _possibly_unimplemented(_TestMinMax, minmax)) # check that test names do not clash names = {} commit f1f8f18ad3be9484bf7e47f96d0923a17f76b28f Author: Blake Griffith Date: Mon Jun 3 19:18:31 2013 -0500 MAINT: Removed extraneous #include diff --git a/scipy/sparse/sparsetools/bool_ops.h b/scipy/sparse/sparsetools/bool_ops.h index c84e5c8..faccdfe 100644 --- a/scipy/sparse/sparsetools/bool_ops.h +++ b/scipy/sparse/sparsetools/bool_ops.h @@ -6,7 +6,6 @@ */ #include -#include typedef npy_int8 npy_bool_wrapper; commit c6ea8cca3d0cb512b997d5bbbb64ccaa55c8aa63 Author: Blake Griffith Date: Fri May 31 11:15:42 2013 -0500 TST: Added tests for .toarray() & .todense() methods for dtype=bool spmatrices. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 55bef0c..a0f359a 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -276,6 +276,10 @@ class _TestCommon: dense_dot_dense = self.dat * b check2 = self.datsp.todense() * b assert_array_equal(dense_dot_dense, check2) + # Check bool data works. + spbool = self.spmatrix(self.dat, dtype=bool) + matbool = self.dat.astype(bool) + assert_array_equal(spbool.todense(), matbool) def test_toarray(self): # Check C-contiguous (default). @@ -310,6 +314,10 @@ class _TestCommon: dense_dot_dense = dot(dat, b) check2 = dot(self.datsp.toarray(), b) assert_array_equal(dense_dot_dense, check2) + # Check bool data works. + spbool = self.spmatrix(self.dat, dtype=bool) + arrbool = dat.astype(bool) + assert_array_equal(spbool.toarray(), arrbool) def test_astype(self): D = array([[1.0 + 3j, 0, 0], commit f3a0025bfab83acdfe16188b19e7f6fa77601625 Author: Blake Griffith Date: Thu May 30 21:53:39 2013 -0500 ENH: Regenerated *.py and *.cxx files with SWIG diff --git a/scipy/sparse/sparsetools/bsr.py b/scipy/sparse/sparsetools/bsr.py index 2eb4275..27b7a6f 100644 Binary files a/scipy/sparse/sparsetools/bsr.py and b/scipy/sparse/sparsetools/bsr.py differ diff --git a/scipy/sparse/sparsetools/bsr_wrap.cxx b/scipy/sparse/sparsetools/bsr_wrap.cxx index ede7c09..d1bd5ae 100644 Binary files a/scipy/sparse/sparsetools/bsr_wrap.cxx and b/scipy/sparse/sparsetools/bsr_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/coo.py b/scipy/sparse/sparsetools/coo.py index 48d8a03..d06f0bd 100644 Binary files a/scipy/sparse/sparsetools/coo.py and b/scipy/sparse/sparsetools/coo.py differ diff --git a/scipy/sparse/sparsetools/coo_wrap.cxx b/scipy/sparse/sparsetools/coo_wrap.cxx index 9a7f8c6..b57bdf4 100644 Binary files a/scipy/sparse/sparsetools/coo_wrap.cxx and b/scipy/sparse/sparsetools/coo_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csc.py b/scipy/sparse/sparsetools/csc.py index a1ed064..fdb034f 100644 Binary files a/scipy/sparse/sparsetools/csc.py and b/scipy/sparse/sparsetools/csc.py differ diff --git a/scipy/sparse/sparsetools/csc_wrap.cxx b/scipy/sparse/sparsetools/csc_wrap.cxx index 51e568f..4a0f49b 100644 Binary files a/scipy/sparse/sparsetools/csc_wrap.cxx and b/scipy/sparse/sparsetools/csc_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csgraph.py b/scipy/sparse/sparsetools/csgraph.py index daf9b61..1805577 100644 Binary files a/scipy/sparse/sparsetools/csgraph.py and b/scipy/sparse/sparsetools/csgraph.py differ diff --git a/scipy/sparse/sparsetools/csgraph_wrap.cxx b/scipy/sparse/sparsetools/csgraph_wrap.cxx index 9dae5ea..afb45e3 100644 Binary files a/scipy/sparse/sparsetools/csgraph_wrap.cxx and b/scipy/sparse/sparsetools/csgraph_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/csr.py b/scipy/sparse/sparsetools/csr.py index a2d1024..b15e2db 100644 Binary files a/scipy/sparse/sparsetools/csr.py and b/scipy/sparse/sparsetools/csr.py differ diff --git a/scipy/sparse/sparsetools/csr_wrap.cxx b/scipy/sparse/sparsetools/csr_wrap.cxx index 44a942d..62b2267 100644 Binary files a/scipy/sparse/sparsetools/csr_wrap.cxx and b/scipy/sparse/sparsetools/csr_wrap.cxx differ diff --git a/scipy/sparse/sparsetools/dia.py b/scipy/sparse/sparsetools/dia.py index ae2b628..3195a92 100644 Binary files a/scipy/sparse/sparsetools/dia.py and b/scipy/sparse/sparsetools/dia.py differ diff --git a/scipy/sparse/sparsetools/dia_wrap.cxx b/scipy/sparse/sparsetools/dia_wrap.cxx index a4de7d3..b85310f 100644 Binary files a/scipy/sparse/sparsetools/dia_wrap.cxx and b/scipy/sparse/sparsetools/dia_wrap.cxx differ commit 54c6a6491423f42880d29afd613767562ad9f36c Author: Blake Griffith Date: Thu May 30 21:50:21 2013 -0500 ENH: numpy.i & sparsetools.i interface files now include the bool_ops.h file to handle the npy_bool dtype. diff --git a/scipy/sparse/sparsetools/numpy.i b/scipy/sparse/sparsetools/numpy.i index 1136017..43dd362 100644 --- a/scipy/sparse/sparsetools/numpy.i +++ b/scipy/sparse/sparsetools/numpy.i @@ -6,6 +6,7 @@ #include "stdio.h" #include #include "complex_ops.h" +#include "bool_ops.h" /* The following code originally appeared in @@ -531,6 +532,7 @@ NPY_TYPECHECK(type, typecode) %enddef +INSTANTIATE_TYPEMAPS(npy_bool_wrapper, NPY_BOOL ) INSTANTIATE_TYPEMAPS(char, NPY_CHAR ) INSTANTIATE_TYPEMAPS(unsigned char, NPY_UBYTE ) INSTANTIATE_TYPEMAPS(signed char, NPY_BYTE ) diff --git a/scipy/sparse/sparsetools/sparsetools.i b/scipy/sparse/sparsetools/sparsetools.i index 9418605..280eea0 100644 --- a/scipy/sparse/sparsetools/sparsetools.i +++ b/scipy/sparse/sparsetools/sparsetools.i @@ -10,6 +10,7 @@ #include "Python.h" #include "numpy/arrayobject.h" #include "complex_ops.h" +#include "bool_ops.h" /*#include "sparsetools.h"*/ %} @@ -146,6 +147,7 @@ T_INPLACE_ARRAY2( ctype ) */ DECLARE_INDEX_TYPE( int ) +DECLARE_DATA_TYPE( npy_bool_wrapper ) DECLARE_DATA_TYPE( signed char ) DECLARE_DATA_TYPE( unsigned char ) DECLARE_DATA_TYPE( short ) @@ -173,6 +175,7 @@ DECLARE_DATA_TYPE( npy_clongdouble_wrapper ) %define INSTANTIATE_ALL( f_name ) /* 32-bit indices */ +%template(f_name) f_name; %template(f_name) f_name; %template(f_name) f_name; %template(f_name) f_name; commit 3b8d0f4c622b10d1c01e0ee7314238af543d7685 Author: Blake Griffith Date: Thu May 30 21:46:23 2013 -0500 ENH: Added bool_ops.h file which wraps npy_bool data to handle bool operations. diff --git a/scipy/sparse/sparsetools/bool_ops.h b/scipy/sparse/sparsetools/bool_ops.h new file mode 100644 index 0000000..c84e5c8 --- /dev/null +++ b/scipy/sparse/sparsetools/bool_ops.h @@ -0,0 +1,13 @@ +#ifndef BOOL_OPS_H +#define BOOL_OPS_H + +/* + * Functions to handle arithmetic operations on NumPy Bool values. + */ + +#include +#include + +typedef npy_int8 npy_bool_wrapper; + +#endif commit 8aabdaa09e335951103f01dc49d98fe93182b48c Author: Blake Griffith Date: Thu May 23 17:05:38 2013 -0500 Made docstring more descriptive. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 00e60ed..55bef0c 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -1,7 +1,10 @@ # # Authors: Travis Oliphant, Ed Schofield, Robert Cimrman, Nathan Bell, and others -""" Test functions for sparse matrices +""" Test functions for sparse matrices. Each class in the "Matrix class +based tests" section become subclasses of the classes in the "Generic +tests" section. This is done by the functions in the "Tailored base +class for generic tests" section. """ commit b0f486f0f24f7b2af21855704d0ef767f2c5f7c6 Author: Blake Griffith Date: Mon Apr 29 18:31:36 2013 -0500 BUG: can now multiply (1,M)x(1,N) or (M,1)x(1,N) vectors together, as with numpy.multiply. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index ee36451..48c83a2 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -249,25 +249,21 @@ class _cs_matrix(_data_matrix, _minmax_mixin): if (isdense(other) or isinstance(other, tuple) or isinstance(other, list)): return np.multiply(self.todense(), other) - # Sparse matrix or vector. + # Sparse matrix or vector. if isspmatrix(other): if self.shape == other.shape: other = self.__class__(other) return self._binopt(other, '_elmul_') - # singl element + # Single element. elif other.shape == (1,1): return self.__mul__(other.tocsc().data[0]) elif self.shape == (1,1): return other.__mul__(self.tocsc().data[0]) - # a row times a column - if self.shape[::-1] == other.shape: - if self.shape[1] == other.shape[0] == 1: - return self._mul_sparse_matrix(other.tocsc()) - elif self.shape[0] == other.shape[1] == 1: - return other._mul_sparse_matrix(self.tocsc()) - else: - raise ValueError("inconsistent shapes") - + # A row times a column. + elif self.shape[1] == other.shape[0] == 1: + return self._mul_sparse_matrix(other.tocsc()) + elif self.shape[0] == other.shape[1] == 1: + return other._mul_sparse_matrix(self.tocsc()) # Row vector times matrix. other is a row. elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: other = dia_matrix((other.toarray().ravel(), [0]), commit f70b265172cf200cc84d575402aaccf48e9a2847 Author: Blake Griffith Date: Mon Apr 29 18:05:06 2013 -0500 TST: Tests for the new broadcasting functionality of the sparse.multiply method. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index ed18fb6..d3c9beb 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -423,18 +423,36 @@ class _TestCommon: Dsp = self.spmatrix(D) Esp = self.spmatrix(E) Fsp = self.spmatrix(F) - - assert_almost_equal( Fsp.multiply(6).todense(), F*6) #sparse/scalar - assert_almost_equal( Fsp.multiply(A), F*A) #sparse/dense - assert_almost_equal( Fsp.multiply(Bsp).todense(), F*B) #sparse/sparse - assert_almost_equal( Fsp.multiply(B), F*B) #sparse/dense - assert_almost_equal( Fsp.multiply(C), F*C) #sparse/dense - assert_almost_equal( Fsp.multiply(Dsp).todense(), F*D) #sparse/sparse - assert_almost_equal( Fsp.multiply(D), F*D) #sparse/dense - assert_almost_equal( Fsp.multiply(E), F*E) #sparse/dense - assert_almost_equal( Fsp.multiply(Esp).todense(), F*E) #spares/sparse - assert_almost_equal( Fsp.multiply(G), F*G) #sparse/dense + matrices = [A, B, C, D, E, F, G] + spmatrices = [Bsp, Dsp, Esp, Fsp] + # sparse/sparse + for i in spmatrices: + for j in spmatrices: + try: + dense_mult = np.multiply(i.todense(), j.todense()) + except ValueError: + assert_raises(ValueError, i.multiply, j) + continue + sp_mult = i.multiply(j) + if isspmatrix(sp_mult): + assert_almost_equal(sp_mult.todense(), dense_mult) + else: + assert_almost_equal(sp_mult, dense_mult) + + # sparse/dense + for i in spmatrices: + for j in matrices: + try: + dense_mult = np.multiply(i.todense(), j) + except ValueError: + assert_raises(ValueError, i.multiply, j) + continue + sp_mult = i.multiply(j) + if isspmatrix(sp_mult): + assert_almost_equal(sp_mult.todense(), dense_mult) + else: + assert_almost_equal(sp_mult, dense_mult) def test_elementwise_divide(self): expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]] commit a2f007cb2ff6acf8a03aacdaf1f92696fc0b4a10 Author: Blake Griffith Date: Mon Apr 29 18:04:04 2013 -0500 ENH: Added ability for sparse.multiply to work with sparse or dense vectors or scalars. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index a7e7889..ee36451 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -254,19 +254,40 @@ class _cs_matrix(_data_matrix, _minmax_mixin): if self.shape == other.shape: other = self.__class__(other) return self._binopt(other, '_elmul_') - # row vector + # singl element + elif other.shape == (1,1): + return self.__mul__(other.tocsc().data[0]) + elif self.shape == (1,1): + return other.__mul__(self.tocsc().data[0]) + # a row times a column + if self.shape[::-1] == other.shape: + if self.shape[1] == other.shape[0] == 1: + return self._mul_sparse_matrix(other.tocsc()) + elif self.shape[0] == other.shape[1] == 1: + return other._mul_sparse_matrix(self.tocsc()) + else: + raise ValueError("inconsistent shapes") + + # Row vector times matrix. other is a row. elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: other = dia_matrix((other.toarray().ravel(), [0]), shape=self.shape) return self._mul_sparse_matrix(other) - # column vector - elif other.shape[1] == 1 and self.shape[1] == other.shape[0]: + # self is a row. + elif self.shape[0] == 1 and self.shape[1] == other.shape[1]: + copy = dia_matrix((self.toarray().ravel(), [0]), + shape=other.shape) + return other._mul_sparse_matrix(copy) + # Column vector times matrix. other is a column. + elif other.shape[1] == 1 and self.shape[0] == other.shape[0]: other = dia_matrix((other.toarray().ravel(), [0]), shape=self.shape) return other._mul_sparse_matrix(self) - # singl element - elif other.shape == (1,1): - return self.__mul__(other.tocsc().data[0]) + # self is a column. + elif self.shape[1] == 1 and self.shape[0] == other.shape[0]: + copy = dia_matrix((self.toarray().ravel(), [0]), + shape=other.shape) + return copy._mul_sparse_matrix(other) else: raise ValueError("inconsistent shapes") commit a994fff064e6cebd451185d05359f6f799e35bf6 Author: Blake Griffith Date: Sat Apr 27 13:26:59 2013 -0500 Added test to .multiply for scalar multiplication and lists. diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index b1938e0..ed18fb6 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -415,6 +415,7 @@ class _TestCommon: D = array([[7,9,-9]]) E = array([[3],[2],[1]]) F = array([[8,6,3],[-4,3,2],[6,6,6]]) + G = [1, 2, 3] # Rank 1 arrays can't be cast as spmatrices (A and C) so leave # them out. @@ -423,14 +424,17 @@ class _TestCommon: Esp = self.spmatrix(E) Fsp = self.spmatrix(F) + assert_almost_equal( Fsp.multiply(6).todense(), F*6) #sparse/scalar assert_almost_equal( Fsp.multiply(A), F*A) #sparse/dense assert_almost_equal( Fsp.multiply(Bsp).todense(), F*B) #sparse/sparse assert_almost_equal( Fsp.multiply(B), F*B) #sparse/dense assert_almost_equal( Fsp.multiply(C), F*C) #sparse/dense assert_almost_equal( Fsp.multiply(Dsp).todense(), F*D) #sparse/sparse assert_almost_equal( Fsp.multiply(D), F*D) #sparse/dense - assert_almost_equal( Fsp.multiply(E), F*E) - assert_almost_equal( Fsp.multiply(Esp).todense(), F*E) + assert_almost_equal( Fsp.multiply(E), F*E) #sparse/dense + assert_almost_equal( Fsp.multiply(Esp).todense(), F*E) #spares/sparse + assert_almost_equal( Fsp.multiply(G), F*G) #sparse/dense + def test_elementwise_divide(self): expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]] commit 0ac511eed08efbce7f73a3da3ba9ead45447c129 Author: Blake Griffith Date: Sat Apr 27 13:26:19 2013 -0500 ENH: Changed multiply to return dense matrix when multiplied with single element list or tuple. Raise value error when trying to multiply to sparse matrices that are the same size. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index 45e450f..a7e7889 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -10,6 +10,7 @@ from scipy.lib.six.moves import xrange from .base import spmatrix, isspmatrix, SparseEfficiencyWarning from .data import _data_matrix, _minmax_mixin +from .dia import dia_matrix from . import sparsetools from .sputils import upcast, upcast_char, to_native, isdense, isshape, \ getdtype, isscalarlike, isintlike @@ -244,14 +245,9 @@ class _cs_matrix(_data_matrix, _minmax_mixin): # Scalar multiplication. if isscalarlike(other): return self.__mul__(other) - # List or tuple vector. - if isinstance(other, tuple) or isinstance(other, list): - if len(other) == 1: - return self.__mul__(other) - else: - return np.multiply(self.todense(), other) # Dense matrix or vector. - if isdense(other): + if (isdense(other) or isinstance(other, tuple) or + isinstance(other, list)): return np.multiply(self.todense(), other) # Sparse matrix or vector. if isspmatrix(other): @@ -260,19 +256,20 @@ class _cs_matrix(_data_matrix, _minmax_mixin): return self._binopt(other, '_elmul_') # row vector elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: - eye = np.eye(other.shape[1]) - other = np.multiply(eye, other.todense()) - other = self.__class__(other) + other = dia_matrix((other.toarray().ravel(), [0]), + shape=self.shape) return self._mul_sparse_matrix(other) # column vector elif other.shape[1] == 1 and self.shape[1] == other.shape[0]: - eye = np.eye(other.shape[0]) - other = np.multiply(eye, other.todense()) - other = self.__class__(other) + other = dia_matrix((other.toarray().ravel(), [0]), + shape=self.shape) return other._mul_sparse_matrix(self) # singl element elif other.shape == (1,1): return self.__mul__(other.tocsc().data[0]) + else: + raise ValueError("inconsistent shapes") + ########################### # Multiplication handlers # commit 0467990d29f2de125e6ff8d3392ad45508c0da0b Author: Blake Griffith Date: Fri Apr 26 23:35:04 2013 -0500 ENH: Refactored this method as suggested by @pv. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index cd7cf6e..45e450f 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -1,5 +1,4 @@ -"""Base class for sparse matrix formats using compressed storage -""" +"""Base class for sparse matrix formats using compressed storage.""" from __future__ import division, print_function, absolute_import __all__ = [] @@ -17,7 +16,7 @@ from .sputils import upcast, upcast_char, to_native, isdense, isshape, \ class _cs_matrix(_data_matrix, _minmax_mixin): - """base matrix class for compressed row and column oriented matrices""" + """Base matrix class for compressed row and column oriented matrices.""" def __init__(self, arg1, shape=None, dtype=None, copy=False): _data_matrix.__init__(self) @@ -239,51 +238,41 @@ class _cs_matrix(_data_matrix, _minmax_mixin): def multiply(self, other): - """Point-wise multiplication by another matrix + """Point-wise multiplication by another matrix, vector, or + scalar. """ # Scalar multiplication. if isscalarlike(other): return self.__mul__(other) - # Catch 1D and 2D arrays with single elments. - elif other.size == 1: - # Small, might as well be dense. - if not isdense(other): - other = other.todense() - # 1D array with single element. - if other.ndim == 1: - return self.__mul__(other[0]) - # 2D array with single element. - elif other.ndim == 2: - return self.__mul__(other[0,0]) - # What could it be if not one of these? - - # Row vector multiplication. - # Check if other shape is a 1D or 2D row array. - if other.ndim == 1: - # cast as 2D array - other = np.array([other]) - if other.shape[0] == 1: - # Check that it is correct dimensions. - if other.shape[1] != self.shape[1]: - raise ValueError('inconsistent shapes') - # Can only support dense multiplication at the moment. - if not isdense(other): - other = other.todense() + # List or tuple vector. + if isinstance(other, tuple) or isinstance(other, list): + if len(other) == 1: + return self.__mul__(other) else: - # Cast other as diagonal matrix, then matrix multiply. - other = np.multiply(np.eye(other.shape[1]), other) - return np.dot(self.todense(), other) - - # Element by element-wise matrix multiplication. - elif other.shape != self.shape: - raise ValueError('inconsistent shapes') - + return np.multiply(self.todense(), other) + # Dense matrix or vector. if isdense(other): - return np.multiply(self.todense(),other) - else: - other = self.__class__(other) - return self._binopt(other,'_elmul_') - + return np.multiply(self.todense(), other) + # Sparse matrix or vector. + if isspmatrix(other): + if self.shape == other.shape: + other = self.__class__(other) + return self._binopt(other, '_elmul_') + # row vector + elif other.shape[0] == 1 and self.shape[1] == other.shape[1]: + eye = np.eye(other.shape[1]) + other = np.multiply(eye, other.todense()) + other = self.__class__(other) + return self._mul_sparse_matrix(other) + # column vector + elif other.shape[1] == 1 and self.shape[1] == other.shape[0]: + eye = np.eye(other.shape[0]) + other = np.multiply(eye, other.todense()) + other = self.__class__(other) + return other._mul_sparse_matrix(self) + # singl element + elif other.shape == (1,1): + return self.__mul__(other.tocsc().data[0]) ########################### # Multiplication handlers # diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index c470fbc..b1938e0 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -413,23 +413,24 @@ class _TestCommon: B = array([[-9]]) C = array([1,-1,0]) D = array([[7,9,-9]]) - E = array([[8,6,3],[-4,3,2],[6,6,6]]) + E = array([[3],[2],[1]]) + F = array([[8,6,3],[-4,3,2],[6,6,6]]) # Rank 1 arrays can't be cast as spmatrices (A and C) so leave # them out. Bsp = self.spmatrix(B) Dsp = self.spmatrix(D) Esp = self.spmatrix(E) + Fsp = self.spmatrix(F) - assert_almost_equal( Esp.multiply(A).todense(), E*A) #sparse/dense - - assert_almost_equal( Esp.multiply(Bsp).todense(), E*B) #sparse/sparse - assert_almost_equal( Esp.multiply(B).todense(), E*B) #sparse/dense - - assert_almost_equal( Esp.multiply(C), E*C) #sparse/dense - - assert_almost_equal( Esp.multiply(Dsp), E*D) #sparse/sparse - assert_almost_equal( Esp.multiply(D), E*D) #sparse/dense + assert_almost_equal( Fsp.multiply(A), F*A) #sparse/dense + assert_almost_equal( Fsp.multiply(Bsp).todense(), F*B) #sparse/sparse + assert_almost_equal( Fsp.multiply(B), F*B) #sparse/dense + assert_almost_equal( Fsp.multiply(C), F*C) #sparse/dense + assert_almost_equal( Fsp.multiply(Dsp).todense(), F*D) #sparse/sparse + assert_almost_equal( Fsp.multiply(D), F*D) #sparse/dense + assert_almost_equal( Fsp.multiply(E), F*E) + assert_almost_equal( Fsp.multiply(Esp).todense(), F*E) def test_elementwise_divide(self): expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]] commit 421e2626762d8f0f075d7b4c7de78b78ad41dad3 Author: Blake Griffith Date: Sat Apr 20 00:02:02 2013 -0500 ENH: Increase parity with numpy.multiply, sparse.multiply now broadcasts row vectors and scalars like numpy.array. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index a0dd855..cd7cf6e 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -241,18 +241,40 @@ class _cs_matrix(_data_matrix, _minmax_mixin): def multiply(self, other): """Point-wise multiplication by another matrix """ - # scalar mult if necessary + # Scalar multiplication. if isscalarlike(other): return self.__mul__(other) - # Check if other shape is a vector (e.g. shape = (n,)) - if len(other.shape) == 1: - if self.shape[0] != 1: - raise ValueError('inconsistent shapes') - elif other.shape[0] != self.shape[1]: + # Catch 1D and 2D arrays with single elments. + elif other.size == 1: + # Small, might as well be dense. + if not isdense(other): + other = other.todense() + # 1D array with single element. + if other.ndim == 1: + return self.__mul__(other[0]) + # 2D array with single element. + elif other.ndim == 2: + return self.__mul__(other[0,0]) + # What could it be if not one of these? + + # Row vector multiplication. + # Check if other shape is a 1D or 2D row array. + if other.ndim == 1: + # cast as 2D array + other = np.array([other]) + if other.shape[0] == 1: + # Check that it is correct dimensions. + if other.shape[1] != self.shape[1]: raise ValueError('inconsistent shapes') + # Can only support dense multiplication at the moment. + if not isdense(other): + other = other.todense() else: - return np.multiply(self.todense(),other) + # Cast other as diagonal matrix, then matrix multiply. + other = np.multiply(np.eye(other.shape[1]), other) + return np.dot(self.todense(), other) + # Element by element-wise matrix multiplication. elif other.shape != self.shape: raise ValueError('inconsistent shapes') diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index e5e9c7e..c470fbc 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -408,6 +408,28 @@ class _TestCommon: assert_almost_equal( Asp.multiply(Dsp).todense(), A*D) #sparse/sparse assert_almost_equal( Asp.multiply(D), A*D) #sparse/dense + def test_elementwise_multiply_broadcast(self): + A = array([4]) + B = array([[-9]]) + C = array([1,-1,0]) + D = array([[7,9,-9]]) + E = array([[8,6,3],[-4,3,2],[6,6,6]]) + + # Rank 1 arrays can't be cast as spmatrices (A and C) so leave + # them out. + Bsp = self.spmatrix(B) + Dsp = self.spmatrix(D) + Esp = self.spmatrix(E) + + assert_almost_equal( Esp.multiply(A).todense(), E*A) #sparse/dense + + assert_almost_equal( Esp.multiply(Bsp).todense(), E*B) #sparse/sparse + assert_almost_equal( Esp.multiply(B).todense(), E*B) #sparse/dense + + assert_almost_equal( Esp.multiply(C), E*C) #sparse/dense + + assert_almost_equal( Esp.multiply(Dsp), E*D) #sparse/sparse + assert_almost_equal( Esp.multiply(D), E*D) #sparse/dense def test_elementwise_divide(self): expected = [[1,0,0,1],[1,0,1,0],[0,1,0,0]] commit bec490c9e85494b9a2df9b215a95bffd20e4e991 Author: Blake Griffith Date: Thu Apr 18 21:30:08 2013 -0500 DOC: added docs for my previous commits. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index b8a6068..a0dd855 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -241,9 +241,10 @@ class _cs_matrix(_data_matrix, _minmax_mixin): def multiply(self, other): """Point-wise multiplication by another matrix """ - # Check if other shape is a vector (e.g. shape = (n,)) + # scalar mult if necessary if isscalarlike(other): return self.__mul__(other) + # Check if other shape is a vector (e.g. shape = (n,)) if len(other.shape) == 1: if self.shape[0] != 1: raise ValueError('inconsistent shapes') commit 4ce29ab2c09237c7bf2252eaf6379ba46233f49b Author: Blake Griffith Date: Thu Apr 18 21:17:09 2013 -0500 ENH: Added scalar multiplication to spmatrix.multiply method. diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index 39c456f..b8a6068 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -242,6 +242,8 @@ class _cs_matrix(_data_matrix, _minmax_mixin): """Point-wise multiplication by another matrix """ # Check if other shape is a vector (e.g. shape = (n,)) + if isscalarlike(other): + return self.__mul__(other) if len(other.shape) == 1: if self.shape[0] != 1: raise ValueError('inconsistent shapes') commit 954fc2e3a46b3f9aa6a087145e646f55796c4194 Author: Blake Griffith Date: Thu Apr 18 18:07:40 2013 -0500 BUG: partial fix to bug 1042, lets sparse.multiply work with vector array (e.g. array.shape = (6,)). diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index 2a6f52a..39c456f 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -241,7 +241,16 @@ class _cs_matrix(_data_matrix, _minmax_mixin): def multiply(self, other): """Point-wise multiplication by another matrix """ - if other.shape != self.shape: + # Check if other shape is a vector (e.g. shape = (n,)) + if len(other.shape) == 1: + if self.shape[0] != 1: + raise ValueError('inconsistent shapes') + elif other.shape[0] != self.shape[1]: + raise ValueError('inconsistent shapes') + else: + return np.multiply(self.todense(),other) + + elif other.shape != self.shape: raise ValueError('inconsistent shapes') if isdense(other):