GNU Octave  4.0.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
Sparse.cc
Go to the documentation of this file.
1 // Template sparse array class
2 /*
3 
4 Copyright (C) 2004-2015 David Bateman
5 Copyright (C) 1998-2004 Andy Adler
6 Copyright (C) 2010 VZLU Prague
7 
8 This file is part of Octave.
9 
10 Octave is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 3 of the License, or (at your
13 option) any later version.
14 
15 Octave is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with Octave; see the file COPYING. If not, see
22 <http://www.gnu.org/licenses/>.
23 
24 */
25 
26 #ifdef HAVE_CONFIG_H
27 #include <config.h>
28 #endif
29 
30 #include <cassert>
31 
32 #include <algorithm>
33 #include <iostream>
34 #include <limits>
35 #include <sstream>
36 #include <vector>
37 
38 #include "Array.h"
39 #include "MArray.h"
40 #include "Array-util.h"
41 #include "Range.h"
42 #include "idx-vector.h"
43 #include "lo-error.h"
44 #include "quit.h"
45 #include "oct-locbuf.h"
46 
47 #include "Sparse.h"
48 #include "sparse-sort.h"
49 #include "sparse-util.h"
50 #include "oct-spparms.h"
51 #include "mx-inlines.cc"
52 
53 #include "PermMatrix.h"
54 
55 template <class T>
57  : rep (new typename Sparse<T>::SparseRep (a.rows (), a.cols (), a.rows ())),
58  dimensions (dim_vector (a.rows (), a.cols ()))
59 {
60  octave_idx_type n = a.rows ();
61  for (octave_idx_type i = 0; i <= n; i++)
62  cidx (i) = i;
63 
64  const Array<octave_idx_type> pv = a.col_perm_vec ();
65 
66  for (octave_idx_type i = 0; i < n; i++)
67  ridx (i) = pv(i);
68 
69  for (octave_idx_type i = 0; i < n; i++)
70  data (i) = 1.0;
71 }
72 
73 template <class T>
74 T&
76 {
78 
79  if (nzmx > 0)
80  {
81  for (i = c[_c]; i < c[_c + 1]; i++)
82  if (r[i] == _r)
83  return d[i];
84  else if (r[i] > _r)
85  break;
86 
87  // Ok, If we've gotten here, we're in trouble.. Have to create a
88  // new element in the sparse array. This' gonna be slow!!!
89  if (c[ncols] == nzmx)
90  {
91  (*current_liboctave_error_handler)
92  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
93  return *d;
94  }
95 
96  octave_idx_type to_move = c[ncols] - i;
97  if (to_move != 0)
98  {
99  for (octave_idx_type j = c[ncols]; j > i; j--)
100  {
101  d[j] = d[j-1];
102  r[j] = r[j-1];
103  }
104  }
105 
106  for (octave_idx_type j = _c + 1; j < ncols + 1; j++)
107  c[j] = c[j] + 1;
108 
109  d[i] = 0.;
110  r[i] = _r;
111 
112  return d[i];
113  }
114  else
115  {
116  (*current_liboctave_error_handler)
117  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
118  return *d;
119  }
120 }
121 
122 template <class T>
123 T
125 {
126  if (nzmx > 0)
127  for (octave_idx_type i = c[_c]; i < c[_c + 1]; i++)
128  if (r[i] == _r)
129  return d[i];
130  return T ();
131 }
132 
133 template <class T>
134 void
136 {
137  if (remove_zeros)
138  {
139  octave_idx_type i = 0;
140  octave_idx_type k = 0;
141  for (octave_idx_type j = 1; j <= ncols; j++)
142  {
143  octave_idx_type u = c[j];
144  for (i = i; i < u; i++)
145  if (d[i] != T ())
146  {
147  d[k] = d[i];
148  r[k++] = r[i];
149  }
150  c[j] = k;
151  }
152  }
153 
154  change_length (c[ncols]);
155 }
156 
157 template <class T>
158 void
160 {
161  for (octave_idx_type j = ncols; j > 0 && c[j] > nz; j--)
162  c[j] = nz;
163 
164  // We shall skip reallocation if we have less than 1/frac extra elements to
165  // discard.
166  static const int frac = 5;
167  if (nz > nzmx || nz < nzmx - nzmx/frac)
168  {
169  // Reallocate.
170  octave_idx_type min_nzmx = std::min (nz, nzmx);
171 
172  octave_idx_type * new_ridx = new octave_idx_type [nz];
173  std::copy (r, r + min_nzmx, new_ridx);
174 
175  delete [] r;
176  r = new_ridx;
177 
178  T * new_data = new T [nz];
179  std::copy (d, d + min_nzmx, new_data);
180 
181  delete [] d;
182  d = new_data;
183 
184  nzmx = nz;
185  }
186 }
187 
188 template <class T>
189 bool
191 {
192  return sparse_indices_ok (r, c, nrows, ncols, nnz ());
193 }
194 
195 template <class T>
197  : rep (0), dimensions (dim_vector (nr, nc))
198 {
199  if (val != T ())
200  {
201  rep = new typename Sparse<T>::SparseRep (nr, nc, dimensions.safe_numel ());
202 
203  octave_idx_type ii = 0;
204  xcidx (0) = 0;
205  for (octave_idx_type j = 0; j < nc; j++)
206  {
207  for (octave_idx_type i = 0; i < nr; i++)
208  {
209  xdata (ii) = val;
210  xridx (ii++) = i;
211  }
212  xcidx (j+1) = ii;
213  }
214  }
215  else
216  {
217  rep = new typename Sparse<T>::SparseRep (nr, nc, 0);
218  for (octave_idx_type j = 0; j < nc+1; j++)
219  xcidx (j) = 0;
220  }
221 }
222 
223 template <class T>
225  : rep (0), dimensions (dv)
226 {
227  if (dv.length () != 2)
229  ("Sparse::Sparse (const dim_vector&): dimension mismatch");
230  else
231  rep = new typename Sparse<T>::SparseRep (dv(0), dv(1), 0);
232 }
233 
234 template <class T>
236  : rep (0), dimensions (dv)
237 {
238 
239  // Work in unsigned long long to avoid overflow issues with numel
240  unsigned long long a_nel = static_cast<unsigned long long>(a.rows ()) *
241  static_cast<unsigned long long>(a.cols ());
242  unsigned long long dv_nel = static_cast<unsigned long long>(dv (0)) *
243  static_cast<unsigned long long>(dv (1));
244 
245  if (a_nel != dv_nel)
246  (*current_liboctave_error_handler)
247  ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
248  else
249  {
250  dim_vector old_dims = a.dims ();
251  octave_idx_type new_nzmx = a.nnz ();
252  octave_idx_type new_nr = dv (0);
253  octave_idx_type new_nc = dv (1);
254  octave_idx_type old_nr = old_dims (0);
255  octave_idx_type old_nc = old_dims (1);
256 
257  rep = new typename Sparse<T>::SparseRep (new_nr, new_nc, new_nzmx);
258 
259  octave_idx_type kk = 0;
260  xcidx (0) = 0;
261  for (octave_idx_type i = 0; i < old_nc; i++)
262  for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++)
263  {
264  octave_idx_type tmp = i * old_nr + a.ridx (j);
265  octave_idx_type ii = tmp % new_nr;
266  octave_idx_type jj = (tmp - ii) / new_nr;
267  for (octave_idx_type k = kk; k < jj; k++)
268  xcidx (k+1) = j;
269  kk = jj;
270  xdata (j) = a.data (j);
271  xridx (j) = ii;
272  }
273  for (octave_idx_type k = kk; k < new_nc; k++)
274  xcidx (k+1) = new_nzmx;
275  }
276 }
277 
278 template <class T>
280  const idx_vector& c, octave_idx_type nr,
281  octave_idx_type nc, bool sum_terms,
282  octave_idx_type nzm)
283  : rep (0), dimensions ()
284 {
285  if (nr < 0)
286  nr = r.extent (0);
287  else if (r.extent (nr) > nr)
288  (*current_liboctave_error_handler) ("sparse: row index %d out of bound %d",
289  r.extent (nr), nr);
290 
291  if (nc < 0)
292  nc = c.extent (0);
293  else if (c.extent (nc) > nc)
295  ("sparse: column index %d out of bound %d", r.extent (nc), nc);
296 
297  dimensions = dim_vector (nr, nc);
298 
299  octave_idx_type n = a.numel ();
300  octave_idx_type rl = r.length (nr);
301  octave_idx_type cl = c.length (nc);
302  bool a_scalar = n == 1;
303  if (a_scalar)
304  {
305  if (rl != 1)
306  n = rl;
307  else if (cl != 1)
308  n = cl;
309  }
310 
311  if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
312  (*current_liboctave_error_handler) ("sparse: dimension mismatch");
313 
314  // Only create rep after input validation to avoid memory leak.
315  rep = new typename Sparse<T>::SparseRep (nr, nc, (nzm > 0 ? nzm : 0));
316 
317  if (rl <= 1 && cl <= 1)
318  {
319  if (n == 1 && a(0) != T ())
320  {
321  change_capacity (nzm > 1 ? nzm : 1);
322  xcidx (0) = 0;
323  xridx (0) = r(0);
324  xdata (0) = a(0);
325 
326  for (octave_idx_type j = 0; j < nc; j++)
327  xcidx (j+1) = j >= c(0);
328  }
329  }
330  else if (a_scalar)
331  {
332  // This is completely specialized, because the sorts can be simplified.
333  T a0 = a(0);
334  if (a0 == T ())
335  {
336  // Do nothing, it's an empty matrix.
337  }
338  else if (cl == 1)
339  {
340  // Sparse column vector. Sort row indices.
341  idx_vector rs = r.sorted ();
342 
343  octave_quit ();
344 
345  const octave_idx_type *rd = rs.raw ();
346  // Count unique indices.
347  octave_idx_type new_nz = 1;
348  for (octave_idx_type i = 1; i < n; i++)
349  new_nz += rd[i-1] != rd[i];
350  // Allocate result.
351  change_capacity (nzm > new_nz ? nzm : new_nz);
352  xcidx (0) = 0;
353  xcidx (1) = new_nz;
354  octave_idx_type *rri = ridx ();
355  T *rrd = data ();
356 
357  octave_quit ();
358 
359  octave_idx_type k = -1;
360  octave_idx_type l = -1;
361 
362  if (sum_terms)
363  {
364  // Sum repeated indices.
365  for (octave_idx_type i = 0; i < n; i++)
366  {
367  if (rd[i] != l)
368  {
369  l = rd[i];
370  rri[++k] = rd[i];
371  rrd[k] = a0;
372  }
373  else
374  rrd[k] += a0;
375  }
376  }
377  else
378  {
379  // Pick the last one.
380  for (octave_idx_type i = 0; i < n; i++)
381  {
382  if (rd[i] != l)
383  {
384  l = rd[i];
385  rri[++k] = rd[i];
386  rrd[k] = a0;
387  }
388  }
389  }
390 
391  }
392  else
393  {
394  idx_vector rr = r;
395  idx_vector cc = c;
396  const octave_idx_type *rd = rr.raw ();
397  const octave_idx_type *cd = cc.raw ();
399  ci[0] = 0;
400  // Bin counts of column indices.
401  for (octave_idx_type i = 0; i < n; i++)
402  ci[cd[i]+1]++;
403  // Make them cumulative, shifted one to right.
404  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
405  {
406  octave_idx_type s1 = s + ci[i];
407  ci[i] = s;
408  s = s1;
409  }
410 
411  octave_quit ();
412 
413  // Bucket sort.
415  for (octave_idx_type i = 0; i < n; i++)
416  if (rl == 1)
417  sidx[ci[cd[i]+1]++] = rd[0];
418  else
419  sidx[ci[cd[i]+1]++] = rd[i];
420 
421  // Subsorts. We don't need a stable sort, all values are equal.
422  xcidx (0) = 0;
423  for (octave_idx_type j = 0; j < nc; j++)
424  {
425  std::sort (sidx + ci[j], sidx + ci[j+1]);
426  octave_idx_type l = -1;
427  octave_idx_type nzj = 0;
428  // Count.
429  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
430  {
431  octave_idx_type k = sidx[i];
432  if (k != l)
433  {
434  l = k;
435  nzj++;
436  }
437  }
438  // Set column pointer.
439  xcidx (j+1) = xcidx (j) + nzj;
440  }
441 
442  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
443  octave_idx_type *rri = ridx ();
444  T *rrd = data ();
445 
446  // Fill-in data.
447  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
448  {
449  octave_quit ();
450  octave_idx_type l = -1;
451  if (sum_terms)
452  {
453  // Sum adjacent terms.
454  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
455  {
456  octave_idx_type k = sidx[i];
457  if (k != l)
458  {
459  l = k;
460  rrd[++jj] = a0;
461  rri[jj] = k;
462  }
463  else
464  rrd[jj] += a0;
465  }
466  }
467  else
468  {
469  // Use the last one.
470  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
471  {
472  octave_idx_type k = sidx[i];
473  if (k != l)
474  {
475  l = k;
476  rrd[++jj] = a0;
477  rri[jj] = k;
478  }
479  }
480  }
481  }
482  }
483  }
484  else if (cl == 1)
485  {
486  // Sparse column vector. Sort row indices.
488  idx_vector rs = r.sorted (rsi);
489 
490  octave_quit ();
491 
492  const octave_idx_type *rd = rs.raw ();
493  const octave_idx_type *rdi = rsi.data ();
494  // Count unique indices.
495  octave_idx_type new_nz = 1;
496  for (octave_idx_type i = 1; i < n; i++)
497  new_nz += rd[i-1] != rd[i];
498  // Allocate result.
499  change_capacity (nzm > new_nz ? nzm : new_nz);
500  xcidx (0) = 0;
501  xcidx (1) = new_nz;
502  octave_idx_type *rri = ridx ();
503  T *rrd = data ();
504 
505  octave_quit ();
506 
507  octave_idx_type k = 0;
508  rri[k] = rd[0];
509  rrd[k] = a(rdi[0]);
510 
511  if (sum_terms)
512  {
513  // Sum repeated indices.
514  for (octave_idx_type i = 1; i < n; i++)
515  {
516  if (rd[i] != rd[i-1])
517  {
518  rri[++k] = rd[i];
519  rrd[k] = a(rdi[i]);
520  }
521  else
522  rrd[k] += a(rdi[i]);
523  }
524  }
525  else
526  {
527  // Pick the last one.
528  for (octave_idx_type i = 1; i < n; i++)
529  {
530  if (rd[i] != rd[i-1])
531  rri[++k] = rd[i];
532  rrd[k] = a(rdi[i]);
533  }
534  }
535 
536  maybe_compress (true);
537  }
538  else
539  {
540  idx_vector rr = r;
541  idx_vector cc = c;
542  const octave_idx_type *rd = rr.raw ();
543  const octave_idx_type *cd = cc.raw ();
545  ci[0] = 0;
546  // Bin counts of column indices.
547  for (octave_idx_type i = 0; i < n; i++)
548  ci[cd[i]+1]++;
549  // Make them cumulative, shifted one to right.
550  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
551  {
552  octave_idx_type s1 = s + ci[i];
553  ci[i] = s;
554  s = s1;
555  }
556 
557  octave_quit ();
558 
559  typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
560  // Bucket sort.
561  OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
562  for (octave_idx_type i = 0; i < n; i++)
563  {
564  idx_pair& p = spairs[ci[cd[i]+1]++];
565  if (rl == 1)
566  p.first = rd[0];
567  else
568  p.first = rd[i];
569  p.second = i;
570  }
571 
572  // Subsorts. We don't need a stable sort, the second index stabilizes it.
573  xcidx (0) = 0;
574  for (octave_idx_type j = 0; j < nc; j++)
575  {
576  std::sort (spairs + ci[j], spairs + ci[j+1]);
577  octave_idx_type l = -1;
578  octave_idx_type nzj = 0;
579  // Count.
580  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
581  {
582  octave_idx_type k = spairs[i].first;
583  if (k != l)
584  {
585  l = k;
586  nzj++;
587  }
588  }
589  // Set column pointer.
590  xcidx (j+1) = xcidx (j) + nzj;
591  }
592 
593  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
594  octave_idx_type *rri = ridx ();
595  T *rrd = data ();
596 
597  // Fill-in data.
598  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
599  {
600  octave_quit ();
601  octave_idx_type l = -1;
602  if (sum_terms)
603  {
604  // Sum adjacent terms.
605  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
606  {
607  octave_idx_type k = spairs[i].first;
608  if (k != l)
609  {
610  l = k;
611  rrd[++jj] = a(spairs[i].second);
612  rri[jj] = k;
613  }
614  else
615  rrd[jj] += a(spairs[i].second);
616  }
617  }
618  else
619  {
620  // Use the last one.
621  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
622  {
623  octave_idx_type k = spairs[i].first;
624  if (k != l)
625  {
626  l = k;
627  rri[++jj] = k;
628  }
629  rrd[jj] = a(spairs[i].second);
630  }
631  }
632  }
633 
634  maybe_compress (true);
635  }
636 }
637 
638 template <class T>
640  : rep (0), dimensions (a.dims ())
641 {
642  if (dimensions.length () > 2)
644  ("Sparse::Sparse (const Array<T>&): dimension mismatch");
645  else
646  {
647  octave_idx_type nr = rows ();
648  octave_idx_type nc = cols ();
649  octave_idx_type len = a.length ();
650  octave_idx_type new_nzmx = 0;
651 
652  // First count the number of nonzero terms
653  for (octave_idx_type i = 0; i < len; i++)
654  if (a(i) != T ())
655  new_nzmx++;
656 
657  rep = new typename Sparse<T>::SparseRep (nr, nc, new_nzmx);
658 
659  octave_idx_type ii = 0;
660  xcidx (0) = 0;
661  for (octave_idx_type j = 0; j < nc; j++)
662  {
663  for (octave_idx_type i = 0; i < nr; i++)
664  if (a.elem (i,j) != T ())
665  {
666  xdata (ii) = a.elem (i,j);
667  xridx (ii++) = i;
668  }
669  xcidx (j+1) = ii;
670  }
671  }
672 }
673 
674 template <class T>
676 {
677  if (--rep->count == 0)
678  delete rep;
679 }
680 
681 template <class T>
682 Sparse<T>&
684 {
685  if (this != &a)
686  {
687  if (--rep->count == 0)
688  delete rep;
689 
690  rep = a.rep;
691  rep->count++;
692 
693  dimensions = a.dimensions;
694  }
695 
696  return *this;
697 }
698 
699 template <class T>
702 {
703  octave_idx_type retval = -1;
704 
705  octave_idx_type n = dimensions.length ();
706 
707  if (n > 0 && n == ra_idx.length ())
708  {
709  retval = ra_idx(--n);
710 
711  while (--n >= 0)
712  {
713  retval *= dimensions(n);
714  retval += ra_idx(n);
715  }
716  }
717  else
719  ("Sparse<T>::compute_index: invalid ra_idxing operation");
720 
721  return retval;
722 }
723 
724 template <class T>
725 T
726 Sparse<T>::range_error (const char *fcn, octave_idx_type n) const
727 {
728  (*current_liboctave_error_handler) ("%s (%d): range error", fcn, n);
729  return T ();
730 }
731 
732 template <class T>
733 T&
735 {
736  (*current_liboctave_error_handler) ("%s (%d): range error", fcn, n);
737  static T foo;
738  return foo;
739 }
740 
741 template <class T>
742 T
744  octave_idx_type j) const
745 {
746  (*current_liboctave_error_handler)
747  ("%s (%d, %d): range error", fcn, i, j);
748  return T ();
749 }
750 
751 template <class T>
752 T&
754 {
755  (*current_liboctave_error_handler)
756  ("%s (%d, %d): range error", fcn, i, j);
757  static T foo;
758  return foo;
759 }
760 
761 template <class T>
762 T
763 Sparse<T>::range_error (const char *fcn,
764  const Array<octave_idx_type>& ra_idx) const
765 {
766  std::ostringstream buf;
767 
768  buf << fcn << " (";
769 
770  octave_idx_type n = ra_idx.length ();
771 
772  if (n > 0)
773  buf << ra_idx(0);
774 
775  for (octave_idx_type i = 1; i < n; i++)
776  buf << ", " << ra_idx(i);
777 
778  buf << "): range error";
779 
780  std::string buf_str = buf.str ();
781 
782  (*current_liboctave_error_handler) (buf_str.c_str ());
783 
784  return T ();
785 }
786 
787 template <class T>
788 T&
790 {
791  std::ostringstream buf;
792 
793  buf << fcn << " (";
794 
795  octave_idx_type n = ra_idx.length ();
796 
797  if (n > 0)
798  buf << ra_idx(0);
799 
800  for (octave_idx_type i = 1; i < n; i++)
801  buf << ", " << ra_idx(i);
802 
803  buf << "): range error";
804 
805  std::string buf_str = buf.str ();
806 
807  (*current_liboctave_error_handler) (buf_str.c_str ());
808 
809  static T foo;
810  return foo;
811 }
812 
813 template <class T>
814 Sparse<T>
815 Sparse<T>::reshape (const dim_vector& new_dims) const
816 {
817  Sparse<T> retval;
818  dim_vector dims2 = new_dims;
819 
820  if (dims2.length () > 2)
821  {
822  (*current_liboctave_warning_with_id_handler)
823  ("Octave:reshape-smashes-dims",
824  "reshape: sparse reshape to N-d array smashes dims");
825 
826  for (octave_idx_type i = 2; i < dims2.length (); i++)
827  dims2(1) *= dims2(i);
828 
829  dims2.resize (2);
830  }
831 
832  if (dimensions != dims2)
833  {
834  if (dimensions.numel () == dims2.numel ())
835  {
836  octave_idx_type new_nnz = nnz ();
837  octave_idx_type new_nr = dims2 (0);
838  octave_idx_type new_nc = dims2 (1);
839  octave_idx_type old_nr = rows ();
840  octave_idx_type old_nc = cols ();
841  retval = Sparse<T> (new_nr, new_nc, new_nnz);
842 
843  octave_idx_type kk = 0;
844  retval.xcidx (0) = 0;
845  for (octave_idx_type i = 0; i < old_nc; i++)
846  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
847  {
848  octave_idx_type tmp = i * old_nr + ridx (j);
849  if (tmp < 0)
850  (*current_liboctave_error_handler)
851  ("reshape: overflow in octave_idx_type prevents reshaping array");
852 
853  octave_idx_type ii = tmp % new_nr;
854  octave_idx_type jj = (tmp - ii) / new_nr;
855  for (octave_idx_type k = kk; k < jj; k++)
856  retval.xcidx (k+1) = j;
857  kk = jj;
858  retval.xdata (j) = data (j);
859  retval.xridx (j) = ii;
860  }
861  for (octave_idx_type k = kk; k < new_nc; k++)
862  retval.xcidx (k+1) = new_nnz;
863  }
864  else
865  {
866  std::string dimensions_str = dimensions.str ();
867  std::string new_dims_str = new_dims.str ();
868 
869  (*current_liboctave_error_handler)
870  ("reshape: can't reshape %s array to %s array",
871  dimensions_str.c_str (), new_dims_str.c_str ());
872  }
873  }
874  else
875  retval = *this;
876 
877  return retval;
878 }
879 
880 template <class T>
881 Sparse<T>
882 Sparse<T>::permute (const Array<octave_idx_type>& perm_vec, bool) const
883 {
884  // The only valid permutations of a sparse array are [1, 2] and [2, 1].
885 
886  bool fail = false;
887  bool trans = false;
888 
889  if (perm_vec.length () == 2)
890  {
891  if (perm_vec(0) == 0 && perm_vec(1) == 1)
892  /* do nothing */;
893  else if (perm_vec(0) == 1 && perm_vec(1) == 0)
894  trans = true;
895  else
896  fail = true;
897  }
898  else
899  fail = true;
900 
901  if (fail)
902  (*current_liboctave_error_handler)
903  ("permutation vector contains an invalid element");
904 
905  return trans ? this->transpose () : *this;
906 }
907 
908 template <class T>
909 void
911 {
912  octave_idx_type nr = rows ();
913  octave_idx_type nc = cols ();
914 
915  if (nr == 0)
916  resize (1, std::max (nc, n));
917  else if (nc == 0)
918  resize (nr, (n + nr - 1) / nr); // Ain't it wicked?
919  else if (nr == 1)
920  resize (1, n);
921  else if (nc == 1)
922  resize (n, 1);
923  else
925 }
926 
927 template <class T>
928 void
930 {
931  octave_idx_type n = dv.length ();
932 
933  if (n != 2)
934  {
935  (*current_liboctave_error_handler) ("sparse array must be 2-D");
936  return;
937  }
938 
939  resize (dv(0), dv(1));
940 }
941 
942 template <class T>
943 void
945 {
946  if (r < 0 || c < 0)
947  {
948  (*current_liboctave_error_handler)
949  ("can't resize to negative dimension");
950  return;
951  }
952 
953  if (r == dim1 () && c == dim2 ())
954  return;
955 
956  // This wouldn't be necessary for r >= rows () if nrows wasn't part of the
957  // Sparse rep. It is not good for anything in there.
958  make_unique ();
959 
960  if (r < rows ())
961  {
962  octave_idx_type i = 0;
963  octave_idx_type k = 0;
964  for (octave_idx_type j = 1; j <= rep->ncols; j++)
965  {
966  octave_idx_type u = xcidx (j);
967  for (i = i; i < u; i++)
968  if (xridx (i) < r)
969  {
970  xdata (k) = xdata (i);
971  xridx (k++) = xridx (i);
972  }
973  xcidx (j) = k;
974  }
975  }
976 
977  rep->nrows = dimensions(0) = r;
978 
979  if (c != rep->ncols)
980  {
981  octave_idx_type *new_cidx = new octave_idx_type [c+1];
982  std::copy (rep->c, rep->c + std::min (c, rep->ncols) + 1, new_cidx);
983  delete [] rep->c;
984  rep->c = new_cidx;
985 
986  if (c > rep->ncols)
987  std::fill_n (rep->c + rep->ncols + 1, c - rep->ncols,
988  rep->c[rep->ncols]);
989  }
990 
991  rep->ncols = dimensions(1) = c;
992 
993  rep->change_length (rep->nnz ());
994 }
995 
996 template <class T>
997 Sparse<T>&
999 {
1000  octave_idx_type a_rows = a.rows ();
1001  octave_idx_type a_cols = a.cols ();
1002  octave_idx_type nr = rows ();
1003  octave_idx_type nc = cols ();
1004 
1005  if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
1006  {
1007  (*current_liboctave_error_handler) ("range error for insert");
1008  return *this;
1009  }
1010 
1011  // First count the number of elements in the final array
1012  octave_idx_type nel = cidx (c) + a.nnz ();
1013 
1014  if (c + a_cols < nc)
1015  nel += cidx (nc) - cidx (c + a_cols);
1016 
1017  for (octave_idx_type i = c; i < c + a_cols; i++)
1018  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1019  if (ridx (j) < r || ridx (j) >= r + a_rows)
1020  nel++;
1021 
1022  Sparse<T> tmp (*this);
1023  --rep->count;
1024  rep = new typename Sparse<T>::SparseRep (nr, nc, nel);
1025 
1026  for (octave_idx_type i = 0; i < tmp.cidx (c); i++)
1027  {
1028  data (i) = tmp.data (i);
1029  ridx (i) = tmp.ridx (i);
1030  }
1031  for (octave_idx_type i = 0; i < c + 1; i++)
1032  cidx (i) = tmp.cidx (i);
1033 
1034  octave_idx_type ii = cidx (c);
1035 
1036  for (octave_idx_type i = c; i < c + a_cols; i++)
1037  {
1038  octave_quit ();
1039 
1040  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1041  if (tmp.ridx (j) < r)
1042  {
1043  data (ii) = tmp.data (j);
1044  ridx (ii++) = tmp.ridx (j);
1045  }
1046 
1047  octave_quit ();
1048 
1049  for (octave_idx_type j = a.cidx (i-c); j < a.cidx (i-c+1); j++)
1050  {
1051  data (ii) = a.data (j);
1052  ridx (ii++) = r + a.ridx (j);
1053  }
1054 
1055  octave_quit ();
1056 
1057  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1058  if (tmp.ridx (j) >= r + a_rows)
1059  {
1060  data (ii) = tmp.data (j);
1061  ridx (ii++) = tmp.ridx (j);
1062  }
1063 
1064  cidx (i+1) = ii;
1065  }
1066 
1067  for (octave_idx_type i = c + a_cols; i < nc; i++)
1068  {
1069  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1070  {
1071  data (ii) = tmp.data (j);
1072  ridx (ii++) = tmp.ridx (j);
1073  }
1074  cidx (i+1) = ii;
1075  }
1076 
1077  return *this;
1078 }
1079 
1080 template <class T>
1081 Sparse<T>&
1083 {
1084 
1085  if (ra_idx.length () != 2)
1086  {
1087  (*current_liboctave_error_handler) ("range error for insert");
1088  return *this;
1089  }
1090 
1091  return insert (a, ra_idx (0), ra_idx (1));
1092 }
1093 
1094 template <class T>
1095 Sparse<T>
1097 {
1098  assert (ndims () == 2);
1099 
1100  octave_idx_type nr = rows ();
1101  octave_idx_type nc = cols ();
1102  octave_idx_type nz = nnz ();
1103  Sparse<T> retval (nc, nr, nz);
1104 
1105  for (octave_idx_type i = 0; i < nz; i++)
1106  retval.xcidx (ridx (i) + 1)++;
1107  // retval.xcidx[1:nr] holds the row degrees for rows 0:(nr-1)
1108  nz = 0;
1109  for (octave_idx_type i = 1; i <= nr; i++)
1110  {
1111  const octave_idx_type tmp = retval.xcidx (i);
1112  retval.xcidx (i) = nz;
1113  nz += tmp;
1114  }
1115  // retval.xcidx[1:nr] holds row entry *start* offsets for rows 0:(nr-1)
1116 
1117  for (octave_idx_type j = 0; j < nc; j++)
1118  for (octave_idx_type k = cidx (j); k < cidx (j+1); k++)
1119  {
1120  octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
1121  retval.xridx (q) = j;
1122  retval.xdata (q) = data (k);
1123  }
1124  assert (nnz () == retval.xcidx (nr));
1125  // retval.xcidx[1:nr] holds row entry *end* offsets for rows 0:(nr-1)
1126  // and retval.xcidx[0:(nr-1)] holds their row entry *start* offsets
1127 
1128  return retval;
1129 }
1130 
1131 // Lower bound lookup. Could also use octave_sort, but that has upper bound
1132 // semantics, so requires some manipulation to set right. Uses a plain loop for
1133 // small columns.
1134 static octave_idx_type
1136  octave_idx_type ri)
1137 {
1138  if (nr <= 8)
1139  {
1140  octave_idx_type l;
1141  for (l = 0; l < nr; l++)
1142  if (ridx[l] >= ri)
1143  break;
1144  return l;
1145  }
1146  else
1147  return std::lower_bound (ridx, ridx + nr, ri) - ridx;
1148 }
1149 
1150 template <class T>
1151 void
1153 {
1154  Sparse<T> retval;
1155 
1156  assert (ndims () == 2);
1157 
1158  octave_idx_type nr = dim1 ();
1159  octave_idx_type nc = dim2 ();
1160  octave_idx_type nz = nnz ();
1161 
1162  octave_idx_type nel = numel (); // Can throw.
1163 
1164  const dim_vector idx_dims = idx.orig_dimensions ();
1165 
1166  if (idx.extent (nel) > nel)
1167  gripe_del_index_out_of_range (true, idx.extent (nel), nel);
1168  else if (nc == 1)
1169  {
1170  // Sparse column vector.
1171  const Sparse<T> tmp = *this; // constant copy to prevent COW.
1172 
1173  octave_idx_type lb, ub;
1174 
1175  if (idx.is_cont_range (nel, lb, ub))
1176  {
1177  // Special-case a contiguous range.
1178  // Look-up indices first.
1179  octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
1180  octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
1181  // Copy data and adjust indices.
1182  octave_idx_type nz_new = nz - (ui - li);
1183  *this = Sparse<T> (nr - (ub - lb), 1, nz_new);
1184  std::copy (tmp.data (), tmp.data () + li, data ());
1185  std::copy (tmp.ridx (), tmp.ridx () + li, xridx ());
1186  std::copy (tmp.data () + ui, tmp.data () + nz, xdata () + li);
1187  mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
1188  xcidx (1) = nz_new;
1189  }
1190  else
1191  {
1192  OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
1193  OCTAVE_LOCAL_BUFFER (T, data_new, nz);
1194  idx_vector sidx = idx.sorted (true);
1195  const octave_idx_type *sj = sidx.raw ();
1196  octave_idx_type sl = sidx.length (nel);
1197  octave_idx_type nz_new = 0;
1198  octave_idx_type j = 0;
1199  for (octave_idx_type i = 0; i < nz; i++)
1200  {
1201  octave_idx_type r = tmp.ridx (i);
1202  for (; j < sl && sj[j] < r; j++) ;
1203  if (j == sl || sj[j] > r)
1204  {
1205  data_new[nz_new] = tmp.data (i);
1206  ridx_new[nz_new++] = r - j;
1207  }
1208  }
1209 
1210  *this = Sparse<T> (nr - sl, 1, nz_new);
1211  std::copy (ridx_new, ridx_new + nz_new, ridx ());
1212  std::copy (data_new, data_new + nz_new, xdata ());
1213  xcidx (1) = nz_new;
1214  }
1215  }
1216  else if (nr == 1)
1217  {
1218  // Sparse row vector.
1219  octave_idx_type lb, ub;
1220  if (idx.is_cont_range (nc, lb, ub))
1221  {
1222  const Sparse<T> tmp = *this;
1223  octave_idx_type lbi = tmp.cidx (lb);
1224  octave_idx_type ubi = tmp.cidx (ub);
1225  octave_idx_type new_nz = nz - (ubi - lbi);
1226  *this = Sparse<T> (1, nc - (ub - lb), new_nz);
1227  std::copy (tmp.data (), tmp.data () + lbi, data ());
1228  std::copy (tmp.data () + ubi, tmp.data () + nz , xdata () + lbi);
1229  std::fill_n (ridx (), new_nz, static_cast<octave_idx_type> (0));
1230  std::copy (tmp.cidx () + 1, tmp.cidx () + 1 + lb, cidx () + 1);
1231  mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1,
1232  ubi - lbi);
1233  }
1234  else
1235  *this = index (idx.complement (nc));
1236  }
1237  else if (idx.length (nel) != 0)
1238  {
1239  if (idx.is_colon_equiv (nel))
1240  *this = Sparse<T> ();
1241  else
1242  {
1243  *this = index (idx_vector::colon);
1244  delete_elements (idx);
1245  *this = transpose (); // We want a row vector.
1246  }
1247  }
1248 }
1249 
1250 template <class T>
1251 void
1253 {
1254  assert (ndims () == 2);
1255 
1256  octave_idx_type nr = dim1 ();
1257  octave_idx_type nc = dim2 ();
1258  octave_idx_type nz = nnz ();
1259 
1260  if (idx_i.is_colon ())
1261  {
1262  // Deleting columns.
1263  octave_idx_type lb, ub;
1264  if (idx_j.extent (nc) > nc)
1265  gripe_del_index_out_of_range (false, idx_j.extent (nc), nc);
1266  else if (idx_j.is_cont_range (nc, lb, ub))
1267  {
1268  if (lb == 0 && ub == nc)
1269  {
1270  // Delete all rows and columns.
1271  *this = Sparse<T> (nr, 0);
1272  }
1273  else if (nz == 0)
1274  {
1275  // No elements to preserve; adjust dimensions.
1276  *this = Sparse<T> (nr, nc - (ub - lb));
1277  }
1278  else
1279  {
1280  const Sparse<T> tmp = *this;
1281  octave_idx_type lbi = tmp.cidx (lb);
1282  octave_idx_type ubi = tmp.cidx (ub);
1283  octave_idx_type new_nz = nz - (ubi - lbi);
1284 
1285  *this = Sparse<T> (nr, nc - (ub - lb), new_nz);
1286  std::copy (tmp.data (), tmp.data () + lbi, data ());
1287  std::copy (tmp.ridx (), tmp.ridx () + lbi, ridx ());
1288  std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1289  std::copy (tmp.ridx () + ubi, tmp.ridx () + nz, xridx () + lbi);
1290  std::copy (tmp.cidx () + 1, tmp.cidx () + 1 + lb, cidx () + 1);
1291  mx_inline_sub (nc - ub, xcidx () + lb + 1,
1292  tmp.cidx () + ub + 1, ubi - lbi);
1293  }
1294  }
1295  else
1296  *this = index (idx_i, idx_j.complement (nc));
1297  }
1298  else if (idx_j.is_colon ())
1299  {
1300  // Deleting rows.
1301  octave_idx_type lb, ub;
1302  if (idx_i.extent (nr) > nr)
1303  gripe_del_index_out_of_range (false, idx_i.extent (nr), nr);
1304  else if (idx_i.is_cont_range (nr, lb, ub))
1305  {
1306  if (lb == 0 && ub == nr)
1307  {
1308  // Delete all rows and columns.
1309  *this = Sparse<T> (0, nc);
1310  }
1311  else if (nz == 0)
1312  {
1313  // No elements to preserve; adjust dimensions.
1314  *this = Sparse<T> (nr - (ub - lb), nc);
1315  }
1316  else
1317  {
1318  // This is more memory-efficient than the approach below.
1319  const Sparse<T> tmpl = index (idx_vector (0, lb), idx_j);
1320  const Sparse<T> tmpu = index (idx_vector (ub, nr), idx_j);
1321  *this = Sparse<T> (nr - (ub - lb), nc,
1322  tmpl.nnz () + tmpu.nnz ());
1323  for (octave_idx_type j = 0, k = 0; j < nc; j++)
1324  {
1325  for (octave_idx_type i = tmpl.cidx (j); i < tmpl.cidx (j+1);
1326  i++)
1327  {
1328  xdata (k) = tmpl.data (i);
1329  xridx (k++) = tmpl.ridx (i);
1330  }
1331  for (octave_idx_type i = tmpu.cidx (j); i < tmpu.cidx (j+1);
1332  i++)
1333  {
1334  xdata (k) = tmpu.data (i);
1335  xridx (k++) = tmpu.ridx (i) + lb;
1336  }
1337 
1338  xcidx (j+1) = k;
1339  }
1340  }
1341  }
1342  else
1343  {
1344  // This is done by transposing, deleting columns, then transposing
1345  // again.
1346  Sparse<T> tmp = transpose ();
1347  tmp.delete_elements (idx_j, idx_i);
1348  *this = tmp.transpose ();
1349  }
1350  }
1351  else
1352  {
1353  // Empty assignment (no elements to delete) is OK if there is at
1354  // least one zero-length index and at most one other index that is
1355  // non-colon (or equivalent) index. Since we only have two
1356  // indices, we just need to check that we have at least one zero
1357  // length index. Matlab considers "[]" to be an empty index but
1358  // not "false". We accept both.
1359 
1360  bool empty_assignment
1361  = (idx_i.length (nr) == 0 || idx_j.length (nc) == 0);
1362 
1363  if (! empty_assignment)
1364  (*current_liboctave_error_handler)
1365  ("a null assignment can only have one non-colon index");
1366  }
1367 }
1368 
1369 template <class T>
1370 void
1372 {
1373  if (dim == 0)
1374  delete_elements (idx, idx_vector::colon);
1375  else if (dim == 1)
1376  delete_elements (idx_vector::colon, idx);
1377  else
1378  {
1379  (*current_liboctave_error_handler)
1380  ("invalid dimension in delete_elements");
1381  return;
1382  }
1383 }
1384 
1385 template <class T>
1386 Sparse<T>
1387 Sparse<T>::index (const idx_vector& idx, bool resize_ok) const
1388 {
1389  Sparse<T> retval;
1390 
1391  assert (ndims () == 2);
1392 
1393  octave_idx_type nr = dim1 ();
1394  octave_idx_type nc = dim2 ();
1395  octave_idx_type nz = nnz ();
1396 
1397  octave_idx_type nel = numel (); // Can throw.
1398 
1399  const dim_vector idx_dims = idx.orig_dimensions ().redim (2);
1400 
1401  if (idx.is_colon ())
1402  {
1403  if (nc == 1)
1404  retval = *this;
1405  else
1406  {
1407  // Fast magic colon processing.
1408  retval = Sparse<T> (nel, 1, nz);
1409 
1410  for (octave_idx_type i = 0; i < nc; i++)
1411  {
1412  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1413  {
1414  retval.xdata (j) = data (j);
1415  retval.xridx (j) = ridx (j) + i * nr;
1416  }
1417  }
1418 
1419  retval.xcidx (0) = 0;
1420  retval.xcidx (1) = nz;
1421  }
1422  }
1423  else if (idx.extent (nel) > nel)
1424  {
1425  // resize_ok is completely handled here.
1426  if (resize_ok)
1427  {
1428  octave_idx_type ext = idx.extent (nel);
1429  Sparse<T> tmp = *this;
1430  tmp.resize1 (ext);
1431  retval = tmp.index (idx);
1432  }
1433  else
1434  gripe_index_out_of_range (1, 1, idx.extent (nel), nel);
1435  }
1436  else if (nr == 1 && nc == 1)
1437  {
1438  // You have to be pretty sick to get to this bit of code,
1439  // since you have a scalar stored as a sparse matrix, and
1440  // then want to make a dense matrix with sparse
1441  // representation. Ok, we'll do it, but you deserve what
1442  // you get!!
1443  retval = Sparse<T> (idx_dims(0), idx_dims(1), nz ? data (0) : T ());
1444  }
1445  else if (nc == 1)
1446  {
1447  // Sparse column vector.
1448  octave_idx_type lb, ub;
1449 
1450  if (idx.is_scalar ())
1451  {
1452  // Scalar index - just a binary lookup.
1453  octave_idx_type i = lblookup (ridx (), nz, idx(0));
1454  if (i < nz && ridx (i) == idx(0))
1455  retval = Sparse (1, 1, data (i));
1456  else
1457  retval = Sparse (1, 1);
1458  }
1459  else if (idx.is_cont_range (nel, lb, ub))
1460  {
1461  // Special-case a contiguous range.
1462  // Look-up indices first.
1463  octave_idx_type li = lblookup (ridx (), nz, lb);
1464  octave_idx_type ui = lblookup (ridx (), nz, ub);
1465  // Copy data and adjust indices.
1466  octave_idx_type nz_new = ui - li;
1467  retval = Sparse<T> (ub - lb, 1, nz_new);
1468  std::copy (data () + li, data () + li + nz_new, retval.data ());
1469  mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
1470  retval.xcidx (1) = nz_new;
1471  }
1472  else if (idx.is_permutation (nel) && idx.is_vector ())
1473  {
1474  if (idx.is_range () && idx.increment () == -1)
1475  {
1476  retval = Sparse<T> (nr, 1, nz);
1477 
1478  for (octave_idx_type j = 0; j < nz; j++)
1479  retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
1480 
1481  std::copy (cidx (), cidx () + 2, retval.cidx ());
1482  std::reverse_copy (data (), data () + nz, retval.data ());
1483  }
1484  else
1485  {
1486  Array<T> tmp = array_value ();
1487  tmp = tmp.index (idx);
1488  retval = Sparse<T> (tmp);
1489  }
1490  }
1491  else
1492  {
1493  // If indexing a sparse column vector by a vector, the result is a
1494  // sparse column vector, otherwise it inherits the shape of index.
1495  // Vector transpose is cheap, so do it right here.
1496 
1497  Array<octave_idx_type> tmp_idx = idx.as_array ().as_matrix ();
1498 
1499  const Array<octave_idx_type> idxa = (idx_dims(0) == 1
1500  ? tmp_idx.transpose ()
1501  : tmp_idx);
1502 
1503  octave_idx_type new_nr = idxa.rows ();
1504  octave_idx_type new_nc = idxa.cols ();
1505 
1506  // Lookup.
1507  // FIXME: Could specialize for sorted idx?
1508  NoAlias< Array<octave_idx_type> > lidx (dim_vector (new_nr, new_nc));
1509  for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
1510  lidx(i) = lblookup (ridx (), nz, idxa(i));
1511 
1512  // Count matches.
1513  retval = Sparse<T> (idxa.rows (), idxa.cols ());
1514  for (octave_idx_type j = 0; j < new_nc; j++)
1515  {
1516  octave_idx_type nzj = 0;
1517  for (octave_idx_type i = 0; i < new_nr; i++)
1518  {
1519  octave_idx_type l = lidx(i, j);
1520  if (l < nz && ridx (l) == idxa(i, j))
1521  nzj++;
1522  else
1523  lidx(i, j) = nz;
1524  }
1525  retval.xcidx (j+1) = retval.xcidx (j) + nzj;
1526  }
1527 
1528  retval.change_capacity (retval.xcidx (new_nc));
1529 
1530  // Copy data and set row indices.
1531  octave_idx_type k = 0;
1532  for (octave_idx_type j = 0; j < new_nc; j++)
1533  for (octave_idx_type i = 0; i < new_nr; i++)
1534  {
1535  octave_idx_type l = lidx(i, j);
1536  if (l < nz)
1537  {
1538  retval.data (k) = data (l);
1539  retval.xridx (k++) = i;
1540  }
1541  }
1542  }
1543  }
1544  else if (nr == 1)
1545  {
1546  octave_idx_type lb, ub;
1547  if (idx.is_scalar ())
1548  retval = Sparse<T> (1, 1, elem (0, idx(0)));
1549  else if (idx.is_cont_range (nel, lb, ub))
1550  {
1551  // Special-case a contiguous range.
1552  octave_idx_type lbi = cidx (lb);
1553  octave_idx_type ubi = cidx (ub);
1554  octave_idx_type new_nz = ubi - lbi;
1555  retval = Sparse<T> (1, ub - lb, new_nz);
1556  std::copy (data () + lbi, data () + lbi + new_nz, retval.data ());
1557  std::fill_n (retval.ridx (), new_nz, static_cast<octave_idx_type> (0));
1558  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1559  }
1560  else
1561  {
1562  // Sparse row vectors occupy O(nr) storage anyway, so let's just
1563  // convert the matrix to full, index, and sparsify the result.
1564  retval = Sparse<T> (array_value ().index (idx));
1565  }
1566  }
1567  else
1568  {
1569  if (nr != 0 && idx.is_scalar ())
1570  retval = Sparse<T> (1, 1, elem (idx(0) % nr, idx(0) / nr));
1571  else
1572  {
1573  // Indexing a non-vector sparse matrix by linear indexing.
1574  // I suppose this is rare (and it may easily overflow), so let's take
1575  // the easy way, and reshape first to column vector, which is already
1576  // handled above.
1577  retval = index (idx_vector::colon).index (idx);
1578  // In this case we're supposed to always inherit the shape, but
1579  // column(row) doesn't do it, so we'll do it instead.
1580  if (idx_dims(0) == 1 && idx_dims(1) != 1)
1581  retval = retval.transpose ();
1582  }
1583  }
1584 
1585  return retval;
1586 }
1587 
1588 template <class T>
1589 Sparse<T>
1590 Sparse<T>::index (const idx_vector& idx_i, const idx_vector& idx_j,
1591  bool resize_ok) const
1592 {
1593  Sparse<T> retval;
1594 
1595  assert (ndims () == 2);
1596 
1597  octave_idx_type nr = dim1 ();
1598  octave_idx_type nc = dim2 ();
1599 
1600  octave_idx_type n = idx_i.length (nr);
1601  octave_idx_type m = idx_j.length (nc);
1602 
1603  octave_idx_type lb, ub;
1604 
1605  if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
1606  {
1607  // resize_ok is completely handled here.
1608  if (resize_ok)
1609  {
1610  octave_idx_type ext_i = idx_i.extent (nr);
1611  octave_idx_type ext_j = idx_j.extent (nc);
1612  Sparse<T> tmp = *this;
1613  tmp.resize (ext_i, ext_j);
1614  retval = tmp.index (idx_i, idx_j);
1615  }
1616  else if (idx_i.extent (nr) > nr)
1617  gripe_index_out_of_range (2, 1, idx_i.extent (nr), nr);
1618  else
1619  gripe_index_out_of_range (2, 2, idx_j.extent (nc), nc);
1620  }
1621  else if (nr == 1 && nc == 1)
1622  {
1623  // Scalars stored as sparse matrices occupy more memory than
1624  // a scalar, so let's just convert the matrix to full, index,
1625  // and sparsify the result.
1626 
1627  retval = Sparse<T> (array_value ().index (idx_i, idx_j));
1628  }
1629  else if (idx_i.is_colon ())
1630  {
1631  // Great, we're just manipulating columns. This is going to be quite
1632  // efficient, because the columns can stay compressed as they are.
1633  if (idx_j.is_colon ())
1634  retval = *this; // Shallow copy.
1635  else if (idx_j.is_cont_range (nc, lb, ub))
1636  {
1637  // Special-case a contiguous range.
1638  octave_idx_type lbi = cidx (lb);
1639  octave_idx_type ubi = cidx (ub);
1640  octave_idx_type new_nz = ubi - lbi;
1641  retval = Sparse<T> (nr, ub - lb, new_nz);
1642  std::copy (data () + lbi, data () + lbi + new_nz, retval.data ());
1643  std::copy (ridx () + lbi, ridx () + lbi + new_nz, retval.ridx ());
1644  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1645  }
1646  else
1647  {
1648  // Count new nonzero elements.
1649  retval = Sparse<T> (nr, m);
1650  for (octave_idx_type j = 0; j < m; j++)
1651  {
1652  octave_idx_type jj = idx_j(j);
1653  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1654  }
1655 
1656  retval.change_capacity (retval.xcidx (m));
1657 
1658  // Copy data & indices.
1659  for (octave_idx_type j = 0; j < m; j++)
1660  {
1661  octave_idx_type ljj = cidx (idx_j(j));
1662  octave_idx_type lj = retval.xcidx (j);
1663  octave_idx_type nzj = retval.xcidx (j+1) - lj;
1664 
1665  std::copy (data () + ljj, data () + ljj + nzj, retval.data () + lj);
1666  std::copy (ridx () + ljj, ridx () + ljj + nzj, retval.ridx () + lj);
1667  }
1668  }
1669  }
1670  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.is_vector ())
1671  {
1672  // It's actually vector indexing. The 1D index is specialized for that.
1673  retval = index (idx_i);
1674 
1675  // If nr == 1 then the vector indexing will return a column vector!!
1676  if (nr == 1)
1677  retval.transpose ();
1678  }
1679  else if (idx_i.is_scalar ())
1680  {
1681  octave_idx_type ii = idx_i(0);
1682  retval = Sparse<T> (1, m);
1684  for (octave_idx_type j = 0; j < m; j++)
1685  {
1686  octave_quit ();
1687  octave_idx_type jj = idx_j(j);
1688  octave_idx_type lj = cidx (jj);
1689  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1690 
1691  // Scalar index - just a binary lookup.
1692  octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
1693  if (i < nzj && ridx (i+lj) == ii)
1694  {
1695  ij[j] = i + lj;
1696  retval.xcidx (j+1) = retval.xcidx (j) + 1;
1697  }
1698  else
1699  retval.xcidx (j+1) = retval.xcidx (j);
1700  }
1701 
1702  retval.change_capacity (retval.xcidx (m));
1703 
1704  // Copy data, adjust row indices.
1705  for (octave_idx_type j = 0; j < m; j++)
1706  {
1707  octave_idx_type i = retval.xcidx (j);
1708  if (retval.xcidx (j+1) > i)
1709  {
1710  retval.xridx (i) = 0;
1711  retval.xdata (i) = data (ij[j]);
1712  }
1713  }
1714  }
1715  else if (idx_i.is_cont_range (nr, lb, ub))
1716  {
1717  retval = Sparse<T> (n, m);
1720  for (octave_idx_type j = 0; j < m; j++)
1721  {
1722  octave_quit ();
1723  octave_idx_type jj = idx_j(j);
1724  octave_idx_type lj = cidx (jj);
1725  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1726  octave_idx_type lij, uij;
1727 
1728  // Lookup indices.
1729  li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
1730  ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
1731  retval.xcidx (j+1) = retval.xcidx (j) + ui[j] - li[j];
1732  }
1733 
1734  retval.change_capacity (retval.xcidx (m));
1735 
1736  // Copy data, adjust row indices.
1737  for (octave_idx_type j = 0, k = 0; j < m; j++)
1738  {
1739  octave_quit ();
1740  for (octave_idx_type i = li[j]; i < ui[j]; i++)
1741  {
1742  retval.xdata (k) = data (i);
1743  retval.xridx (k++) = ridx (i) - lb;
1744  }
1745  }
1746  }
1747  else if (idx_i.is_permutation (nr))
1748  {
1749  // The columns preserve their length, just need to renumber and sort them.
1750  // Count new nonzero elements.
1751  retval = Sparse<T> (nr, m);
1752  for (octave_idx_type j = 0; j < m; j++)
1753  {
1754  octave_idx_type jj = idx_j(j);
1755  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1756  }
1757 
1758  retval.change_capacity (retval.xcidx (m));
1759 
1760  octave_quit ();
1761 
1762  if (idx_i.is_range () && idx_i.increment () == -1)
1763  {
1764  // It's nr:-1:1. Just flip all columns.
1765  for (octave_idx_type j = 0; j < m; j++)
1766  {
1767  octave_quit ();
1768  octave_idx_type jj = idx_j(j);
1769  octave_idx_type lj = cidx (jj);
1770  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1771  octave_idx_type li = retval.xcidx (j);
1772  octave_idx_type uj = lj + nzj - 1;
1773  for (octave_idx_type i = 0; i < nzj; i++)
1774  {
1775  retval.xdata (li + i) = data (uj - i); // Copy in reverse order.
1776  retval.xridx (li + i) = nr - 1 - ridx (uj - i); // Ditto with transform.
1777  }
1778  }
1779  }
1780  else
1781  {
1782  // Get inverse permutation.
1783  idx_vector idx_iinv = idx_i.inverse_permutation (nr);
1784  const octave_idx_type *iinv = idx_iinv.raw ();
1785 
1786  // Scatter buffer.
1787  OCTAVE_LOCAL_BUFFER (T, scb, nr);
1788  octave_idx_type *rri = retval.ridx ();
1789 
1790  for (octave_idx_type j = 0; j < m; j++)
1791  {
1792  octave_quit ();
1793  octave_idx_type jj = idx_j(j);
1794  octave_idx_type lj = cidx (jj);
1795  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1796  octave_idx_type li = retval.xcidx (j);
1797  // Scatter the column, transform indices.
1798  for (octave_idx_type i = 0; i < nzj; i++)
1799  scb[rri[li + i] = iinv[ridx (lj + i)]] = data (lj + i);
1800 
1801  octave_quit ();
1802 
1803  // Sort them.
1804  std::sort (rri + li, rri + li + nzj);
1805 
1806  // Gather.
1807  for (octave_idx_type i = 0; i < nzj; i++)
1808  retval.xdata (li + i) = scb[rri[li + i]];
1809  }
1810  }
1811 
1812  }
1813  else if (idx_j.is_colon ())
1814  {
1815  // This requires uncompressing columns, which is generally costly,
1816  // so we rely on the efficient transpose to handle this.
1817  // It may still make sense to optimize some cases here.
1818  retval = transpose ();
1819  retval = retval.index (idx_vector::colon, idx_i);
1820  retval = retval.transpose ();
1821  }
1822  else
1823  {
1824  // A(I, J) is decomposed into A(:, J)(I, :).
1825  retval = index (idx_vector::colon, idx_j);
1826  retval = retval.index (idx_i, idx_vector::colon);
1827  }
1828 
1829  return retval;
1830 }
1831 
1832 template <class T>
1833 void
1834 Sparse<T>::assign (const idx_vector& idx, const Sparse<T>& rhs)
1835 {
1836  Sparse<T> retval;
1837 
1838  assert (ndims () == 2);
1839 
1840  octave_idx_type nr = dim1 ();
1841  octave_idx_type nc = dim2 ();
1842  octave_idx_type nz = nnz ();
1843 
1844  octave_idx_type n = numel (); // Can throw.
1845 
1846  octave_idx_type rhl = rhs.numel ();
1847 
1848  if (idx.length (n) == rhl)
1849  {
1850  if (rhl == 0)
1851  return;
1852 
1853  octave_idx_type nx = idx.extent (n);
1854  // Try to resize first if necessary.
1855  if (nx != n)
1856  {
1857  resize1 (nx);
1858  n = numel ();
1859  nr = rows ();
1860  nc = cols ();
1861  // nz is preserved.
1862  }
1863 
1864  if (idx.is_colon ())
1865  {
1866  *this = rhs.reshape (dimensions);
1867  }
1868  else if (nc == 1 && rhs.cols () == 1)
1869  {
1870  // Sparse column vector to sparse column vector assignment.
1871 
1872  octave_idx_type lb, ub;
1873  if (idx.is_cont_range (nr, lb, ub))
1874  {
1875  // Special-case a contiguous range.
1876  // Look-up indices first.
1877  octave_idx_type li = lblookup (ridx (), nz, lb);
1878  octave_idx_type ui = lblookup (ridx (), nz, ub);
1879  octave_idx_type rnz = rhs.nnz ();
1880  octave_idx_type new_nz = nz - (ui - li) + rnz;
1881 
1882  if (new_nz >= nz && new_nz <= capacity ())
1883  {
1884  // Adding/overwriting elements, enough capacity allocated.
1885 
1886  if (new_nz > nz)
1887  {
1888  // Make room first.
1889  std::copy_backward (data () + ui, data () + nz,
1890  data () + nz + rnz);
1891  std::copy_backward (ridx () + ui, ridx () + nz,
1892  ridx () + nz + rnz);
1893  }
1894 
1895  // Copy data and adjust indices from rhs.
1896  std::copy (rhs.data (), rhs.data () + rnz, data () + li);
1897  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1898  }
1899  else
1900  {
1901  // Clearing elements or exceeding capacity, allocate afresh
1902  // and paste pieces.
1903  const Sparse<T> tmp = *this;
1904  *this = Sparse<T> (nr, 1, new_nz);
1905 
1906  // Head ...
1907  std::copy (tmp.data (), tmp.data () + li, data ());
1908  std::copy (tmp.ridx (), tmp.ridx () + li, ridx ());
1909 
1910  // new stuff ...
1911  std::copy (rhs.data (), rhs.data () + rnz, data () + li);
1912  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1913 
1914  // ...tail
1915  std::copy (tmp.data () + ui, tmp.data () + nz,
1916  data () + li + rnz);
1917  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
1918  ridx () + li + rnz);
1919  }
1920 
1921  cidx (1) = new_nz;
1922  }
1923  else if (idx.is_range () && idx.increment () == -1)
1924  {
1925  // It's s(u:-1:l) = r. Reverse the assignment.
1926  assign (idx.sorted (), rhs.index (idx_vector (rhl - 1, 0, -1)));
1927  }
1928  else if (idx.is_permutation (n))
1929  {
1930  *this = rhs.index (idx.inverse_permutation (n));
1931  }
1932  else if (rhs.nnz () == 0)
1933  {
1934  // Elements are being zeroed.
1935  octave_idx_type *ri = ridx ();
1936  for (octave_idx_type i = 0; i < rhl; i++)
1937  {
1938  octave_idx_type iidx = idx(i);
1939  octave_idx_type li = lblookup (ri, nz, iidx);
1940  if (li != nz && ri[li] == iidx)
1941  xdata (li) = T ();
1942  }
1943 
1944  maybe_compress (true);
1945  }
1946  else
1947  {
1948  const Sparse<T> tmp = *this;
1949  octave_idx_type new_nz = nz + rhl;
1950  // Disassembly our matrix...
1951  Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
1952  Array<T> new_data (dim_vector (new_nz, 1));
1953  std::copy (tmp.ridx (), tmp.ridx () + nz, new_ri.fortran_vec ());
1954  std::copy (tmp.data (), tmp.data () + nz, new_data.fortran_vec ());
1955  // ... insert new data (densified) ...
1956  idx.copy_data (new_ri.fortran_vec () + nz);
1957  new_data.assign (idx_vector (nz, new_nz), rhs.array_value ());
1958  // ... reassembly.
1959  *this = Sparse<T> (new_data, new_ri,
1960  static_cast<octave_idx_type> (0),
1961  nr, nc, false);
1962  }
1963  }
1964  else
1965  {
1966  dim_vector save_dims = dimensions;
1967  *this = index (idx_vector::colon);
1968  assign (idx, rhs.index (idx_vector::colon));
1969  *this = reshape (save_dims);
1970  }
1971  }
1972  else if (rhl == 1)
1973  {
1974  rhl = idx.length (n);
1975  if (rhs.nnz () != 0)
1976  assign (idx, Sparse<T> (rhl, 1, rhs.data (0)));
1977  else
1978  assign (idx, Sparse<T> (rhl, 1));
1979  }
1980  else
1982 }
1983 
1984 template <class T>
1985 void
1987  const idx_vector& idx_j, const Sparse<T>& rhs)
1988 {
1989  Sparse<T> retval;
1990 
1991  assert (ndims () == 2);
1992 
1993  octave_idx_type nr = dim1 ();
1994  octave_idx_type nc = dim2 ();
1995  octave_idx_type nz = nnz ();
1996 
1997  octave_idx_type n = rhs.rows ();
1998  octave_idx_type m = rhs.columns ();
1999 
2000  // FIXME: this should probably be written more like the
2001  // Array<T>::assign function...
2002 
2003  bool orig_zero_by_zero = (nr == 0 && nc == 0);
2004 
2005  if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
2006  {
2007  octave_idx_type nrx;
2008  octave_idx_type ncx;
2009 
2010  if (orig_zero_by_zero)
2011  {
2012  if (idx_i.is_colon ())
2013  {
2014  nrx = n;
2015 
2016  if (idx_j.is_colon ())
2017  ncx = m;
2018  else
2019  ncx = idx_j.extent (nc);
2020  }
2021  else if (idx_j.is_colon ())
2022  {
2023  nrx = idx_i.extent (nr);
2024  ncx = m;
2025  }
2026  else
2027  {
2028  nrx = idx_i.extent (nr);
2029  ncx = idx_j.extent (nc);
2030  }
2031  }
2032  else
2033  {
2034  nrx = idx_i.extent (nr);
2035  ncx = idx_j.extent (nc);
2036  }
2037 
2038  // Try to resize first if necessary.
2039  if (nrx != nr || ncx != nc)
2040  {
2041  resize (nrx, ncx);
2042  nr = rows ();
2043  nc = cols ();
2044  // nz is preserved.
2045  }
2046 
2047  if (n == 0 || m == 0)
2048  return;
2049 
2050  if (idx_i.is_colon ())
2051  {
2052  octave_idx_type lb, ub;
2053  // Great, we're just manipulating columns. This is going to be quite
2054  // efficient, because the columns can stay compressed as they are.
2055  if (idx_j.is_colon ())
2056  *this = rhs; // Shallow copy.
2057  else if (idx_j.is_cont_range (nc, lb, ub))
2058  {
2059  // Special-case a contiguous range.
2060  octave_idx_type li = cidx (lb);
2061  octave_idx_type ui = cidx (ub);
2062  octave_idx_type rnz = rhs.nnz ();
2063  octave_idx_type new_nz = nz - (ui - li) + rnz;
2064 
2065  if (new_nz >= nz && new_nz <= capacity ())
2066  {
2067  // Adding/overwriting elements, enough capacity allocated.
2068 
2069  if (new_nz > nz)
2070  {
2071  // Make room first.
2072  std::copy (data () + ui, data () + nz,
2073  data () + li + rnz);
2074  std::copy (ridx () + ui, ridx () + nz,
2075  ridx () + li + rnz);
2076  mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
2077  }
2078 
2079  // Copy data and indices from rhs.
2080  std::copy (rhs.data (), rhs.data () + rnz, data () + li);
2081  std::copy (rhs.ridx (), rhs.ridx () + rnz, ridx () + li);
2082  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2083  li);
2084 
2085  assert (nnz () == new_nz);
2086  }
2087  else
2088  {
2089  // Clearing elements or exceeding capacity, allocate afresh
2090  // and paste pieces.
2091  const Sparse<T> tmp = *this;
2092  *this = Sparse<T> (nr, nc, new_nz);
2093 
2094  // Head...
2095  std::copy (tmp.data (), tmp.data () + li, data ());
2096  std::copy (tmp.ridx (), tmp.ridx () + li, ridx ());
2097  std::copy (tmp.cidx () + 1, tmp.cidx () + 1 + lb, cidx () + 1);
2098 
2099  // new stuff...
2100  std::copy (rhs.data (), rhs.data () + rnz, data () + li);
2101  std::copy (rhs.ridx (), rhs.ridx () + rnz, ridx () + li);
2102  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2103  li);
2104 
2105  // ...tail.
2106  std::copy (tmp.data () + ui, tmp.data () + nz,
2107  data () + li + rnz);
2108  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
2109  ridx () + li + rnz);
2110  mx_inline_add (nc - ub, cidx () + ub + 1,
2111  tmp.cidx () + ub + 1, new_nz - nz);
2112 
2113  assert (nnz () == new_nz);
2114  }
2115  }
2116  else if (idx_j.is_range () && idx_j.increment () == -1)
2117  {
2118  // It's s(:,u:-1:l) = r. Reverse the assignment.
2119  assign (idx_i, idx_j.sorted (),
2120  rhs.index (idx_i, idx_vector (m - 1, 0, -1)));
2121  }
2122  else if (idx_j.is_permutation (nc))
2123  {
2124  *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
2125  }
2126  else
2127  {
2128  const Sparse<T> tmp = *this;
2129  *this = Sparse<T> (nr, nc);
2130  OCTAVE_LOCAL_BUFFER_INIT (octave_idx_type, jsav, nc, -1);
2131 
2132  // Assemble column lengths.
2133  for (octave_idx_type i = 0; i < nc; i++)
2134  xcidx (i+1) = tmp.cidx (i+1) - tmp.cidx (i);
2135 
2136  for (octave_idx_type i = 0; i < m; i++)
2137  {
2138  octave_idx_type j =idx_j(i);
2139  jsav[j] = i;
2140  xcidx (j+1) = rhs.cidx (i+1) - rhs.cidx (i);
2141  }
2142 
2143  // Make cumulative.
2144  for (octave_idx_type i = 0; i < nc; i++)
2145  xcidx (i+1) += xcidx (i);
2146 
2147  change_capacity (nnz ());
2148 
2149  // Merge columns.
2150  for (octave_idx_type i = 0; i < nc; i++)
2151  {
2152  octave_idx_type l = xcidx (i);
2153  octave_idx_type u = xcidx (i+1);
2154  octave_idx_type j = jsav[i];
2155  if (j >= 0)
2156  {
2157  // from rhs
2158  octave_idx_type k = rhs.cidx (j);
2159  std::copy (rhs.data () + k, rhs.data () + k + u - l,
2160  xdata () + l);
2161  std::copy (rhs.ridx () + k, rhs.ridx () + k + u - l,
2162  xridx () + l);
2163  }
2164  else
2165  {
2166  // original
2167  octave_idx_type k = tmp.cidx (i);
2168  std::copy (tmp.data () + k, tmp.data () + k + u - l,
2169  xdata () + l);
2170  std::copy (tmp.ridx () + k, tmp.ridx () + k + u - l,
2171  xridx () + l);
2172  }
2173  }
2174 
2175  }
2176  }
2177  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.is_vector ())
2178  {
2179  // It's just vector indexing. The 1D assign is specialized for that.
2180  assign (idx_i, rhs);
2181  }
2182  else if (idx_j.is_colon ())
2183  {
2184  if (idx_i.is_permutation (nr))
2185  {
2186  *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
2187  }
2188  else
2189  {
2190  // FIXME: optimize more special cases?
2191  // In general this requires unpacking the columns, which is slow,
2192  // especially for many small columns. OTOH, transpose is an
2193  // efficient O(nr+nc+nnz) operation.
2194  *this = transpose ();
2195  assign (idx_vector::colon, idx_i, rhs.transpose ());
2196  *this = transpose ();
2197  }
2198  }
2199  else
2200  {
2201  // Split it into 2 assignments and one indexing.
2202  Sparse<T> tmp = index (idx_vector::colon, idx_j);
2203  tmp.assign (idx_i, idx_vector::colon, rhs);
2204  assign (idx_vector::colon, idx_j, tmp);
2205  }
2206  }
2207  else if (m == 1 && n == 1)
2208  {
2209  n = idx_i.length (nr);
2210  m = idx_j.length (nc);
2211  if (rhs.nnz () != 0)
2212  assign (idx_i, idx_j, Sparse<T> (n, m, rhs.data (0)));
2213  else
2214  assign (idx_i, idx_j, Sparse<T> (n, m));
2215  }
2216  else
2218 }
2219 
2220 // Can't use versions of these in Array.cc due to duplication of the
2221 // instantiations for Array<double and Sparse<double>, etc
2222 template <class T>
2223 bool
2225  typename ref_param<T>::type b)
2226 {
2227  return (a < b);
2228 }
2229 
2230 template <class T>
2231 bool
2233  typename ref_param<T>::type b)
2234 {
2235  return (a > b);
2236 }
2237 
2238 template <class T>
2239 Sparse<T>
2241 {
2242  Sparse<T> m = *this;
2243 
2244  octave_idx_type nr = m.rows ();
2245  octave_idx_type nc = m.columns ();
2246 
2247  if (m.length () < 1 || dim > 1)
2248  return m;
2249 
2250  if (dim > 0)
2251  {
2252  m = m.transpose ();
2253  nr = m.rows ();
2254  nc = m.columns ();
2255  }
2256 
2257  octave_sort<T> lsort;
2258 
2259  if (mode == ASCENDING)
2260  lsort.set_compare (sparse_ascending_compare<T>);
2261  else if (mode == DESCENDING)
2262  lsort.set_compare (sparse_descending_compare<T>);
2263  else
2264  abort ();
2265 
2266  T *v = m.data ();
2267  octave_idx_type *mcidx = m.cidx ();
2268  octave_idx_type *mridx = m.ridx ();
2269 
2270  for (octave_idx_type j = 0; j < nc; j++)
2271  {
2272  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2273  lsort.sort (v, ns);
2274 
2275  octave_idx_type i;
2276  if (mode == ASCENDING)
2277  {
2278  for (i = 0; i < ns; i++)
2279  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2280  break;
2281  }
2282  else
2283  {
2284  for (i = 0; i < ns; i++)
2285  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2286  break;
2287  }
2288  for (octave_idx_type k = 0; k < i; k++)
2289  mridx[k] = k;
2290  for (octave_idx_type k = i; k < ns; k++)
2291  mridx[k] = k - ns + nr;
2292 
2293  v += ns;
2294  mridx += ns;
2295  }
2296 
2297  if (dim > 0)
2298  m = m.transpose ();
2299 
2300  return m;
2301 }
2302 
2303 template <class T>
2304 Sparse<T>
2306  sortmode mode) const
2307 {
2308  Sparse<T> m = *this;
2309 
2310  octave_idx_type nr = m.rows ();
2311  octave_idx_type nc = m.columns ();
2312 
2313  if (m.length () < 1 || dim > 1)
2314  {
2315  sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
2316  return m;
2317  }
2318 
2319  if (dim > 0)
2320  {
2321  m = m.transpose ();
2322  nr = m.rows ();
2323  nc = m.columns ();
2324  }
2325 
2326  octave_sort<T> indexed_sort;
2327 
2328  if (mode == ASCENDING)
2329  indexed_sort.set_compare (sparse_ascending_compare<T>);
2330  else if (mode == DESCENDING)
2331  indexed_sort.set_compare (sparse_descending_compare<T>);
2332  else
2333  abort ();
2334 
2335  T *v = m.data ();
2336  octave_idx_type *mcidx = m.cidx ();
2337  octave_idx_type *mridx = m.ridx ();
2338 
2339  sidx = Array<octave_idx_type> (dim_vector (nr, nc));
2341 
2342  for (octave_idx_type j = 0; j < nc; j++)
2343  {
2344  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2345  octave_idx_type offset = j * nr;
2346 
2347  if (ns == 0)
2348  {
2349  for (octave_idx_type k = 0; k < nr; k++)
2350  sidx (offset + k) = k;
2351  }
2352  else
2353  {
2354  for (octave_idx_type i = 0; i < ns; i++)
2355  vi[i] = mridx[i];
2356 
2357  indexed_sort.sort (v, vi, ns);
2358 
2359  octave_idx_type i;
2360  if (mode == ASCENDING)
2361  {
2362  for (i = 0; i < ns; i++)
2363  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2364  break;
2365  }
2366  else
2367  {
2368  for (i = 0; i < ns; i++)
2369  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2370  break;
2371  }
2372 
2373  octave_idx_type ii = 0;
2374  octave_idx_type jj = i;
2375  for (octave_idx_type k = 0; k < nr; k++)
2376  {
2377  if (ii < ns && mridx[ii] == k)
2378  ii++;
2379  else
2380  sidx (offset + jj++) = k;
2381  }
2382 
2383  for (octave_idx_type k = 0; k < i; k++)
2384  {
2385  sidx (k + offset) = vi[k];
2386  mridx[k] = k;
2387  }
2388 
2389  for (octave_idx_type k = i; k < ns; k++)
2390  {
2391  sidx (k - ns + nr + offset) = vi[k];
2392  mridx[k] = k - ns + nr;
2393  }
2394 
2395  v += ns;
2396  mridx += ns;
2397  }
2398  }
2399 
2400  if (dim > 0)
2401  {
2402  m = m.transpose ();
2403  sidx = sidx.transpose ();
2404  }
2405 
2406  return m;
2407 }
2408 
2409 template <class T>
2410 Sparse<T>
2412 {
2413  octave_idx_type nnr = rows ();
2414  octave_idx_type nnc = cols ();
2415  Sparse<T> d;
2416 
2417  if (nnr == 0 || nnc == 0)
2418  ; // do nothing
2419  else if (nnr != 1 && nnc != 1)
2420  {
2421  if (k > 0)
2422  nnc -= k;
2423  else if (k < 0)
2424  nnr += k;
2425 
2426  if (nnr > 0 && nnc > 0)
2427  {
2428  octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2429 
2430  // Count the number of nonzero elements
2431  octave_idx_type nel = 0;
2432  if (k > 0)
2433  {
2434  for (octave_idx_type i = 0; i < ndiag; i++)
2435  if (elem (i, i+k) != 0.)
2436  nel++;
2437  }
2438  else if (k < 0)
2439  {
2440  for (octave_idx_type i = 0; i < ndiag; i++)
2441  if (elem (i-k, i) != 0.)
2442  nel++;
2443  }
2444  else
2445  {
2446  for (octave_idx_type i = 0; i < ndiag; i++)
2447  if (elem (i, i) != 0.)
2448  nel++;
2449  }
2450 
2451  d = Sparse<T> (ndiag, 1, nel);
2452  d.xcidx (0) = 0;
2453  d.xcidx (1) = nel;
2454 
2455  octave_idx_type ii = 0;
2456  if (k > 0)
2457  {
2458  for (octave_idx_type i = 0; i < ndiag; i++)
2459  {
2460  T tmp = elem (i, i+k);
2461  if (tmp != 0.)
2462  {
2463  d.xdata (ii) = tmp;
2464  d.xridx (ii++) = i;
2465  }
2466  }
2467  }
2468  else if (k < 0)
2469  {
2470  for (octave_idx_type i = 0; i < ndiag; i++)
2471  {
2472  T tmp = elem (i-k, i);
2473  if (tmp != 0.)
2474  {
2475  d.xdata (ii) = tmp;
2476  d.xridx (ii++) = i;
2477  }
2478  }
2479  }
2480  else
2481  {
2482  for (octave_idx_type i = 0; i < ndiag; i++)
2483  {
2484  T tmp = elem (i, i);
2485  if (tmp != 0.)
2486  {
2487  d.xdata (ii) = tmp;
2488  d.xridx (ii++) = i;
2489  }
2490  }
2491  }
2492  }
2493  else
2494  {
2495  // Matlab returns [] 0x1 for out-of-range diagonal
2496 
2497  octave_idx_type nr = 0;
2498  octave_idx_type nc = 1;
2499  octave_idx_type nz = 0;
2500 
2501  d = Sparse<T> (nr, nc, nz);
2502  }
2503  }
2504  else if (nnr != 0 && nnc != 0)
2505  {
2506  octave_idx_type roff = 0;
2507  octave_idx_type coff = 0;
2508  if (k > 0)
2509  {
2510  roff = 0;
2511  coff = k;
2512  }
2513  else if (k < 0)
2514  {
2515  roff = -k;
2516  coff = 0;
2517  }
2518 
2519  if (nnr == 1)
2520  {
2521  octave_idx_type n = nnc + std::abs (k);
2522  octave_idx_type nz = nnz ();
2523 
2524  d = Sparse<T> (n, n, nz);
2525 
2526  if (nnz () > 0)
2527  {
2528  for (octave_idx_type i = 0; i < coff+1; i++)
2529  d.xcidx (i) = 0;
2530 
2531  for (octave_idx_type j = 0; j < nnc; j++)
2532  {
2533  for (octave_idx_type i = cidx (j); i < cidx (j+1); i++)
2534  {
2535  d.xdata (i) = data (i);
2536  d.xridx (i) = j + roff;
2537  }
2538  d.xcidx (j + coff + 1) = cidx (j+1);
2539  }
2540 
2541  for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
2542  d.xcidx (i) = nz;
2543  }
2544  }
2545  else
2546  {
2547  octave_idx_type n = nnr + std::abs (k);
2548  octave_idx_type nz = nnz ();
2549 
2550  d = Sparse<T> (n, n, nz);
2551 
2552  if (nnz () > 0)
2553  {
2554  octave_idx_type ii = 0;
2555  octave_idx_type ir = ridx (0);
2556 
2557  for (octave_idx_type i = 0; i < coff+1; i++)
2558  d.xcidx (i) = 0;
2559 
2560  for (octave_idx_type i = 0; i < nnr; i++)
2561  {
2562  if (ir == i)
2563  {
2564  d.xdata (ii) = data (ii);
2565  d.xridx (ii++) = ir + roff;
2566 
2567  if (ii != nz)
2568  ir = ridx (ii);
2569  }
2570  d.xcidx (i + coff + 1) = ii;
2571  }
2572 
2573  for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
2574  d.xcidx (i) = nz;
2575  }
2576  }
2577  }
2578 
2579  return d;
2580 }
2581 
2582 template <class T>
2583 Sparse<T>
2584 Sparse<T>::cat (int dim, octave_idx_type n, const Sparse<T> *sparse_list)
2585 {
2586  // Default concatenation.
2587  bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2588 
2589  if (dim == -1 || dim == -2)
2590  {
2591  concat_rule = &dim_vector::hvcat;
2592  dim = -dim - 1;
2593  }
2594  else if (dim < 0)
2595  (*current_liboctave_error_handler)
2596  ("cat: invalid dimension");
2597 
2598  dim_vector dv;
2599  octave_idx_type total_nz = 0;
2600  if (dim == 0 || dim == 1)
2601  {
2602  if (n == 1)
2603  return sparse_list[0];
2604 
2605  for (octave_idx_type i = 0; i < n; i++)
2606  {
2607  if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
2608  (*current_liboctave_error_handler)
2609  ("cat: dimension mismatch");
2610  total_nz += sparse_list[i].nnz ();
2611  }
2612  }
2613  else
2615  ("cat: invalid dimension for sparse concatenation");
2616 
2617  Sparse<T> retval (dv, total_nz);
2618 
2619  if (retval.is_empty ())
2620  return retval;
2621 
2622  switch (dim)
2623  {
2624  case 0:
2625  {
2626  // sparse vertcat. This is not efficiently handled by assignment,
2627  // so we'll do it directly.
2628  octave_idx_type l = 0;
2629  for (octave_idx_type j = 0; j < dv(1); j++)
2630  {
2631  octave_quit ();
2632 
2633  octave_idx_type rcum = 0;
2634  for (octave_idx_type i = 0; i < n; i++)
2635  {
2636  const Sparse<T>& spi = sparse_list[i];
2637  // Skipping empty matrices. See the comment in Array.cc.
2638  if (spi.is_empty ())
2639  continue;
2640 
2641  octave_idx_type kl = spi.cidx (j);
2642  octave_idx_type ku = spi.cidx (j+1);
2643  for (octave_idx_type k = kl; k < ku; k++, l++)
2644  {
2645  retval.xridx (l) = spi.ridx (k) + rcum;
2646  retval.xdata (l) = spi.data (k);
2647  }
2648 
2649  rcum += spi.rows ();
2650  }
2651 
2652  retval.xcidx (j+1) = l;
2653  }
2654 
2655  break;
2656  }
2657  case 1:
2658  {
2659  octave_idx_type l = 0;
2660  for (octave_idx_type i = 0; i < n; i++)
2661  {
2662  octave_quit ();
2663 
2664  // Skipping empty matrices. See the comment in Array.cc.
2665  if (sparse_list[i].is_empty ())
2666  continue;
2667 
2668  octave_idx_type u = l + sparse_list[i].columns ();
2669  retval.assign (idx_vector::colon, idx_vector (l, u),
2670  sparse_list[i]);
2671  l = u;
2672  }
2673 
2674  break;
2675  }
2676  default:
2677  assert (false);
2678  }
2679 
2680  return retval;
2681 }
2682 
2683 template <class T>
2684 Array<T>
2686 {
2687  NoAlias< Array<T> > retval (dims (), T ());
2688  if (rows () == 1)
2689  {
2690  octave_idx_type i = 0;
2691  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2692  {
2693  if (cidx (j+1) > i)
2694  retval(j) = data (i++);
2695  }
2696  }
2697  else
2698  {
2699  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2700  for (octave_idx_type i = cidx (j), iu = cidx (j+1); i < iu; i++)
2701  retval(ridx (i), j) = data (i);
2702  }
2703 
2704  return retval;
2705 }
2706 
2707 /*
2708  * Tests
2709  *
2710 
2711 %!function x = set_slice (x, dim, slice, arg)
2712 %! switch (dim)
2713 %! case 11
2714 %! x(slice) = 2;
2715 %! case 21
2716 %! x(slice, :) = 2;
2717 %! case 22
2718 %! x(:, slice) = 2;
2719 %! otherwise
2720 %! error ("invalid dim, '%d'", dim);
2721 %! endswitch
2722 %!endfunction
2723 
2724 %!function x = set_slice2 (x, dim, slice)
2725 %! switch (dim)
2726 %! case 11
2727 %! x(slice) = 2 * ones (size (slice));
2728 %! case 21
2729 %! x(slice, :) = 2 * ones (length (slice), columns (x));
2730 %! case 22
2731 %! x(:, slice) = 2 * ones (rows (x), length (slice));
2732 %! otherwise
2733 %! error ("invalid dim, '%d'", dim);
2734 %! endswitch
2735 %!endfunction
2736 
2737 %!function test_sparse_slice (size, dim, slice)
2738 %! x = ones (size);
2739 %! s = set_slice (sparse (x), dim, slice);
2740 %! f = set_slice (x, dim, slice);
2741 %! assert (nnz (s), nnz (f));
2742 %! assert (full (s), f);
2743 %! s = set_slice2 (sparse (x), dim, slice);
2744 %! f = set_slice2 (x, dim, slice);
2745 %! assert (nnz (s), nnz (f));
2746 %! assert (full (s), f);
2747 %!endfunction
2748 
2749 #### 1d indexing
2750 
2751 ## size = [2 0]
2752 %!test test_sparse_slice ([2 0], 11, []);
2753 %!assert (set_slice (sparse (ones ([2 0])), 11, 1), sparse ([2 0]')) # sparse different from full
2754 %!assert (set_slice (sparse (ones ([2 0])), 11, 2), sparse ([0 2]')) # sparse different from full
2755 %!assert (set_slice (sparse (ones ([2 0])), 11, 3), sparse ([0 0; 2 0]')) # sparse different from full
2756 %!assert (set_slice (sparse (ones ([2 0])), 11, 4), sparse ([0 0; 0 2]')) # sparse different from full
2757 
2758 ## size = [0 2]
2759 %!test test_sparse_slice ([0 2], 11, []);
2760 %!assert (set_slice (sparse (ones ([0 2])), 11, 1), sparse ([2 0])) # sparse different from full
2761 %!test test_sparse_slice ([0 2], 11, 2);
2762 %!test test_sparse_slice ([0 2], 11, 3);
2763 %!test test_sparse_slice ([0 2], 11, 4);
2764 %!test test_sparse_slice ([0 2], 11, [4, 4]);
2765 
2766 ## size = [2 1]
2767 %!test test_sparse_slice ([2 1], 11, []);
2768 %!test test_sparse_slice ([2 1], 11, 1);
2769 %!test test_sparse_slice ([2 1], 11, 2);
2770 %!test test_sparse_slice ([2 1], 11, 3);
2771 %!test test_sparse_slice ([2 1], 11, 4);
2772 %!test test_sparse_slice ([2 1], 11, [4, 4]);
2773 
2774 ## size = [1 2]
2775 %!test test_sparse_slice ([1 2], 11, []);
2776 %!test test_sparse_slice ([1 2], 11, 1);
2777 %!test test_sparse_slice ([1 2], 11, 2);
2778 %!test test_sparse_slice ([1 2], 11, 3);
2779 %!test test_sparse_slice ([1 2], 11, 4);
2780 %!test test_sparse_slice ([1 2], 11, [4, 4]);
2781 
2782 ## size = [2 2]
2783 %!test test_sparse_slice ([2 2], 11, []);
2784 %!test test_sparse_slice ([2 2], 11, 1);
2785 %!test test_sparse_slice ([2 2], 11, 2);
2786 %!test test_sparse_slice ([2 2], 11, 3);
2787 %!test test_sparse_slice ([2 2], 11, 4);
2788 %!test test_sparse_slice ([2 2], 11, [4, 4]);
2789 # These 2 errors are the same as in the full case
2790 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 5)
2791 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 6)
2792 
2793 
2794 #### 2d indexing
2795 
2796 ## size = [2 0]
2797 %!test test_sparse_slice ([2 0], 21, []);
2798 %!test test_sparse_slice ([2 0], 21, 1);
2799 %!test test_sparse_slice ([2 0], 21, 2);
2800 %!test test_sparse_slice ([2 0], 21, [2,2]);
2801 %!assert (set_slice (sparse (ones ([2 0])), 21, 3), sparse (3,0))
2802 %!assert (set_slice (sparse (ones ([2 0])), 21, 4), sparse (4,0))
2803 %!test test_sparse_slice ([2 0], 22, []);
2804 %!test test_sparse_slice ([2 0], 22, 1);
2805 %!test test_sparse_slice ([2 0], 22, 2);
2806 %!test test_sparse_slice ([2 0], 22, [2,2]);
2807 %!assert (set_slice (sparse (ones ([2 0])), 22, 3), sparse ([0 0 2;0 0 2])) # sparse different from full
2808 %!assert (set_slice (sparse (ones ([2 0])), 22, 4), sparse ([0 0 0 2;0 0 0 2])) # sparse different from full
2809 
2810 ## size = [0 2]
2811 %!test test_sparse_slice ([0 2], 21, []);
2812 %!test test_sparse_slice ([0 2], 21, 1);
2813 %!test test_sparse_slice ([0 2], 21, 2);
2814 %!test test_sparse_slice ([0 2], 21, [2,2]);
2815 %!assert (set_slice (sparse (ones ([0 2])), 21, 3), sparse ([0 0;0 0;2 2])) # sparse different from full
2816 %!assert (set_slice (sparse (ones ([0 2])), 21, 4), sparse ([0 0;0 0;0 0;2 2])) # sparse different from full
2817 %!test test_sparse_slice ([0 2], 22, []);
2818 %!test test_sparse_slice ([0 2], 22, 1);
2819 %!test test_sparse_slice ([0 2], 22, 2);
2820 %!test test_sparse_slice ([0 2], 22, [2,2]);
2821 %!assert (set_slice (sparse (ones ([0 2])), 22, 3), sparse (0,3))
2822 %!assert (set_slice (sparse (ones ([0 2])), 22, 4), sparse (0,4))
2823 
2824 ## size = [2 1]
2825 %!test test_sparse_slice ([2 1], 21, []);
2826 %!test test_sparse_slice ([2 1], 21, 1);
2827 %!test test_sparse_slice ([2 1], 21, 2);
2828 %!test test_sparse_slice ([2 1], 21, [2,2]);
2829 %!test test_sparse_slice ([2 1], 21, 3);
2830 %!test test_sparse_slice ([2 1], 21, 4);
2831 %!test test_sparse_slice ([2 1], 22, []);
2832 %!test test_sparse_slice ([2 1], 22, 1);
2833 %!test test_sparse_slice ([2 1], 22, 2);
2834 %!test test_sparse_slice ([2 1], 22, [2,2]);
2835 %!test test_sparse_slice ([2 1], 22, 3);
2836 %!test test_sparse_slice ([2 1], 22, 4);
2837 
2838 ## size = [1 2]
2839 %!test test_sparse_slice ([1 2], 21, []);
2840 %!test test_sparse_slice ([1 2], 21, 1);
2841 %!test test_sparse_slice ([1 2], 21, 2);
2842 %!test test_sparse_slice ([1 2], 21, [2,2]);
2843 %!test test_sparse_slice ([1 2], 21, 3);
2844 %!test test_sparse_slice ([1 2], 21, 4);
2845 %!test test_sparse_slice ([1 2], 22, []);
2846 %!test test_sparse_slice ([1 2], 22, 1);
2847 %!test test_sparse_slice ([1 2], 22, 2);
2848 %!test test_sparse_slice ([1 2], 22, [2,2]);
2849 %!test test_sparse_slice ([1 2], 22, 3);
2850 %!test test_sparse_slice ([1 2], 22, 4);
2851 
2852 ## size = [2 2]
2853 %!test test_sparse_slice ([2 2], 21, []);
2854 %!test test_sparse_slice ([2 2], 21, 1);
2855 %!test test_sparse_slice ([2 2], 21, 2);
2856 %!test test_sparse_slice ([2 2], 21, [2,2]);
2857 %!test test_sparse_slice ([2 2], 21, 3);
2858 %!test test_sparse_slice ([2 2], 21, 4);
2859 %!test test_sparse_slice ([2 2], 22, []);
2860 %!test test_sparse_slice ([2 2], 22, 1);
2861 %!test test_sparse_slice ([2 2], 22, 2);
2862 %!test test_sparse_slice ([2 2], 22, [2,2]);
2863 %!test test_sparse_slice ([2 2], 22, 3);
2864 %!test test_sparse_slice ([2 2], 22, 4);
2865 
2866 bug #35570:
2867 
2868 %!assert (speye (3,1)(3:-1:1), sparse ([0; 0; 1]))
2869 
2870 ## Test removing columns (bug #36656)
2871 
2872 %!test
2873 %! s = sparse (magic (5));
2874 %! s(:,2:4) = [];
2875 %! assert (s, sparse (magic (5)(:, [1,5])));
2876 
2877 %!test
2878 %! s = sparse ([], [], [], 1, 1);
2879 %! s(1,:) = [];
2880 %! assert (s, sparse ([], [], [], 0, 1));
2881 
2882 ## Test (bug #37321)
2883 %!test a=sparse (0,0); assert (all (a) == sparse ([1]));
2884 %!test a=sparse (0,1); assert (all (a) == sparse ([1]));
2885 %!test a=sparse (1,0); assert (all (a) == sparse ([1]));
2886 %!test a=sparse (1,0); assert (all (a,2) == sparse ([1]));
2887 %!test a=sparse (1,0); assert (size (all (a,1)), [1 0]);
2888 %!test a=sparse (1,1);
2889 %! assert (all (a) == sparse ([0]));
2890 %! assert (size (all (a)), [1 1]);
2891 %!test a=sparse (2,1);
2892 %! assert (all (a) == sparse ([0]));
2893 %! assert (size (all (a)), [1 1]);
2894 %!test a=sparse (1,2);
2895 %! assert (all (a) == sparse ([0]));
2896 %! assert (size (all (a)), [1 1]);
2897 %!test a=sparse (2,2); assert (isequal (all (a), sparse ([0 0])));
2898 
2899 */
2900 
2901 template <class T>
2902 void
2903 Sparse<T>::print_info (std::ostream& os, const std::string& prefix) const
2904 {
2905  os << prefix << "rep address: " << rep << "\n"
2906  << prefix << "rep->nzmx: " << rep->nzmx << "\n"
2907  << prefix << "rep->nrows: " << rep->nrows << "\n"
2908  << prefix << "rep->ncols: " << rep->ncols << "\n"
2909  << prefix << "rep->data: " << static_cast<void *> (rep->d) << "\n"
2910  << prefix << "rep->ridx: " << static_cast<void *> (rep->r) << "\n"
2911  << prefix << "rep->cidx: " << static_cast<void *> (rep->c) << "\n"
2912  << prefix << "rep->count: " << rep->count << "\n";
2913 }
2914 
2915 #define INSTANTIATE_SPARSE(T, API) \
2916  template class API Sparse<T>;
2917 
Array< T > as_matrix(void) const
Return the array as a matrix.
Definition: Array.h:299
T & elem(octave_idx_type _r, octave_idx_type _c)
Definition: Sparse.cc:75
octave_idx_type * xridx(void)
Definition: Sparse.h:524
int bool
Definition: mex.h:56
void mx_inline_add2(size_t n, R *r, const X *x)
Definition: mx-inlines.cc:95
dim_vector dimensions
Definition: Sparse.h:166
octave_idx_type cols(void) const
Definition: Sparse.h:264
octave_idx_type length(octave_idx_type n=0) const
Definition: idx-vector.h:551
std::string str(char sep= 'x') const
Definition: dim-vector.cc:63
T * data(void)
Definition: Sparse.h:509
octave_idx_type rows(void) const
Definition: Sparse.h:263
void change_length(octave_idx_type nz)
Definition: Sparse.cc:159
octave_idx_type numel(void) const
Definition: Sparse.h:252
const octave_base_value const Array< octave_idx_type > & ra_idx
Sparse< T > & operator=(const Sparse< T > &a)
Definition: Sparse.cc:683
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:206
dim_vector dims(void) const
Definition: Sparse.h:283
octave_idx_type rows(void) const
Definition: PermMatrix.h:55
static const idx_vector colon
Definition: idx-vector.h:492
sortmode
Definition: oct-sort.h:103
octave_idx_type numel(void) const
Number of elements in the array.
Definition: Array.h:275
void set_compare(compare_fcn_type comp)
Definition: oct-sort.h:120
octave_idx_type nzmx
Definition: Sparse.h:68
void resize(int n, int fill_value=0)
Definition: dim-vector.h:287
static void transpose(octave_idx_type N, const octave_idx_type *ridx, const octave_idx_type *cidx, octave_idx_type *ridx2, octave_idx_type *cidx2)
Definition: symrcm.cc:382
octave_idx_type * xcidx(void)
Definition: Sparse.h:537
Sparse< T > index(const idx_vector &i, bool resize_ok=false) const
Definition: Sparse.cc:1387
Sparse< T > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Definition: Sparse.cc:882
bool sparse_indices_ok(octave_idx_type *r, octave_idx_type *c, octave_idx_type nrows, octave_idx_type ncols, octave_idx_type nnz)
Definition: sparse-util.cc:70
static Sparse< T > cat(int dim, octave_idx_type n, const Sparse< T > *sparse_list)
Definition: Sparse.cc:2584
octave_idx_type * r
Definition: Sparse.h:66
Sparse< T > & insert(const Sparse< T > &a, octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:998
octave_idx_type * cidx(void)
Definition: Sparse.h:531
T & elem(octave_idx_type n)
Definition: Array.h:380
octave_idx_type columns(void) const
Definition: Sparse.h:265
bool indices_ok(void) const
Definition: Sparse.cc:190
void gripe_assignment_dimension_mismatch(void)
octave_idx_type * c
Definition: Sparse.h:67
octave_idx_type rows(void) const
Definition: Array.h:313
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:361
F77_RET_T const double const double double * d
T range_error(const char *fcn, octave_idx_type n) const
Definition: Sparse.cc:726
idx_vector inverse_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1174
octave_idx_type nnz(void) const
Definition: Sparse.h:248
bool sparse_ascending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2224
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:177
void assign(const idx_vector &i, const Sparse< T > &rhs)
Definition: Sparse.cc:1834
void maybe_compress(bool remove_zeros)
Definition: Sparse.cc:135
Sparse< T >::SparseRep * rep
Definition: Sparse.h:164
dim_vector orig_dimensions(void) const
Definition: idx-vector.h:593
T celem(octave_idx_type _r, octave_idx_type _c) const
Definition: Sparse.cc:124
liboctave_error_handler current_liboctave_error_handler
Definition: lo-error.c:38
octave_idx_type safe_numel(void) const
Definition: dim-vector.cc:93
void copy_data(octave_idx_type *data) const
Definition: idx-vector.cc:1055
void mx_inline_sub(size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:83
idx_vector complement(octave_idx_type n) const
Definition: idx-vector.cc:1113
void change_capacity(octave_idx_type nz)
Definition: Sparse.h:493
bool sparse_descending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2232
const Array< octave_idx_type > & col_perm_vec(void) const
Definition: PermMatrix.h:72
static void gripe_index_out_of_range(void)
Definition: idx-vector.cc:51
octave_idx_type ncols
Definition: Sparse.h:70
Sparse< T > sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition: Sparse.cc:2240
Sparse< T > reshape(const dim_vector &new_dims) const
Definition: Sparse.cc:815
Sparse< T > maybe_compress(bool remove_zeros=false)
Definition: Sparse.h:469
void resize(octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:944
Array< T > transpose(void) const
Definition: Array.cc:1607
const T * data(void) const
Definition: Array.h:479
static int elem
Definition: __contourc__.cc:49
void gripe_invalid_resize(void)
bool is_vector(void) const
Definition: idx-vector.cc:1283
dim_vector redim(int n) const
Definition: dim-vector.cc:266
bool is_cont_range(octave_idx_type n, octave_idx_type &l, octave_idx_type &u) const
Definition: idx-vector.cc:958
bool is_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1145
Sparse(void)
Definition: Sparse.h:178
if_then_else< is_class_type< T >::no, T, T const & >::result type
Definition: lo-traits.h:116
void delete_elements(const idx_vector &i)
Definition: Sparse.cc:1152
Array< octave_idx_type > as_array(void) const
Definition: idx-vector.cc:1277
bool is_colon_equiv(octave_idx_type n) const
Definition: idx-vector.h:584
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:233
octave_idx_type length(void) const
Number of elements in the array.
Definition: Array.h:267
octave_idx_type extent(octave_idx_type n) const
Definition: idx-vector.h:554
bool is_range(void) const
Definition: idx-vector.h:581
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1514
This is a simple wrapper template that will subclass an Array type or any later type derived from ...
Definition: Array.h:762
octave_idx_type * ridx(void)
Definition: Sparse.h:518
bool is_empty(void) const
Definition: Sparse.h:505
void gripe_invalid_assignment_size(void)
void resize1(octave_idx_type n)
Definition: Sparse.cc:910
octave_idx_type compute_index(const Array< octave_idx_type > &ra_idx) const
Definition: Sparse.cc:701
T * xdata(void)
Definition: Sparse.h:511
bool is_scalar(void) const
Definition: idx-vector.h:578
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:241
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:197
void assign(const idx_vector &i, const Array< T > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array.cc:1140
virtual ~Sparse(void)
Definition: Sparse.cc:675
void mx_inline_add(size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:82
void gripe_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
const T * fortran_vec(void) const
Definition: Array.h:481
octave_idx_type cols(void) const
Definition: Array.h:321
const octave_idx_type * raw(void)
Definition: idx-vector.cc:1042
T abs(T x)
Definition: pr-output.cc:3062
int length(void) const
Definition: dim-vector.h:281
octave_idx_type increment(void) const
Definition: idx-vector.cc:1012
Sparse< T > diag(octave_idx_type k=0) const
Definition: Sparse.cc:2411
void print_info(std::ostream &os, const std::string &prefix) const
Definition: Sparse.cc:2903
Sparse< T > transpose(void) const
Definition: Sparse.cc:1096
static octave_idx_type lblookup(const octave_idx_type *ridx, octave_idx_type nr, octave_idx_type ri)
Definition: Sparse.cc:1135
idx_vector sorted(bool uniq=false) const
Definition: idx-vector.h:587
octave_idx_type length(void) const
Definition: Sparse.h:258
Array< T > index(const idx_vector &i) const
Indexing without resizing.
Definition: Array.cc:716
bool is_colon(void) const
Definition: idx-vector.h:575
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:210
Array< T > array_value(void) const
Definition: Sparse.cc:2685