Github user mengxr commented on a diff in the pull request:
https://github.com/apache/spark/pull/5355#discussion_r28042938
--- Diff: python/pyspark/mllib/linalg.py ---
@@ -687,6 +702,100 @@ def __eq__(self, other):
all(self.values == other.values))
+class SparseMatrix(object):
+ """Sparse Matrix stored in CSC format."""
+ def __init__(self, numRows, numCols, colPtrs, rowIndices, values,
+ isTransposed=False):
+ self.numRows = numRows
+ self.numCols = numCols
+ self.isTransposed = isTransposed
+ if isinstance(colPtrs, basestring):
+ self.colPtrs = np.frombuffer(colPtrs, dtype=np.uint64)
+ else:
+ self.colPtrs = np.asarray(colPtrs, dtype=np.uint64)
+
+ if self.isTransposed and self.colPtrs.size != numRows + 1:
+ raise ValueError("Expected colPtrs of size %d, got %d."
+ % (numRows + 1, self.colPtrs.size))
+ elif not self.isTransposed and self.colPtrs.size != numCols + 1:
+ raise ValueError("Expected colPtrs of size %d, got %d."
+ % (numCols + 1, self.colPtrs.size))
+ if isinstance(rowIndices, basestring):
+ self.rowIndices = np.frombuffer(rowIndices, dtype=np.uint64)
+ else:
+ self.rowIndices = np.asarray(rowIndices, dtype=np.uint64)
+ if isinstance(values, basestring):
+ self.values = np.frombuffer(values, dtype=np.float64)
+ else:
+ self.values = np.asarray(values, dtype=np.float64)
+
+ def __reduce__(self):
+ return SparseMatrix, (
+ self.numRows, self.numCols, self.colPtrs.tostring(),
+ self.rowIndices.tostring(), self.values.tostring(),
+ self.isTransposed
+ )
+
+ def __getitem__(self, indices):
+ i, j = indices
+ if i < 0 or i >= self.numRows:
+ raise ValueError("Row index %d is out of range [0, %d)"
+ % (i, self.numRows))
+ if j >= self.numCols or j < 0:
+ raise ValueError("Column index %d is out of range [0, %d)"
+ % (j, self.numCols))
+
+ # If a CSR matrix is given, then the row index should be searched
+ # for in ColPtrs, and the column index should be searched for in
the
+ # corresponding slice obtained from rowIndices.
+ if self.isTransposed:
+ j, i = i, j
+
+ nz = self.rowIndices[self.colPtrs[j]: self.colPtrs[j + 1]]
+ if nz.size == 0 or i > nz[-1]:
+ return 0.0
+ ind = np.searchsorted(nz, i)
+ if i == nz[ind]:
+ return self.values[self.colPtrs[j]: self.colPtrs[j + 1]][ind]
+ return 0.0
+
+ def _densify_values(self):
+ sparsearr = np.zeros(self.numRows * self.numCols, dtype=np.float64)
+
+ if self.isTransposed:
+ offset_margin = self.numCols
+ else:
+ offset_margin = self.numRows
+
+ offset = 0
+ for ptr in xrange(self.colPtrs.size - 1):
+ startptr = self.colPtrs[ptr]
+ endptr = self.colPtrs[ptr + 1]
+ sparsearr[offset + self.rowIndices[startptr: endptr]] = \
+ self.values[startptr: endptr]
+ offset += offset_margin
+ return sparsearr
+
+ def toArray(self):
+ """
+ Return an numpy.ndarray
+ """
+ if self.isTransposed:
+ order = 'C'
+ else:
+ order = 'F'
--- End diff --
I think we should always use Fortran order. We can modify
`_densify_values()` such that the output is always column-majored.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]