GNU Octave  4.2.1
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
Sparse.cc
Go to the documentation of this file.
1 // Template sparse array class
2 /*
3 
4 Copyright (C) 2004-2017 David Bateman
5 Copyright (C) 1998-2004 Andy Adler
6 Copyright (C) 2010 VZLU Prague
7 
8 This file is part of Octave.
9 
10 Octave is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 3 of the License, or (at your
13 option) any later version.
14 
15 Octave is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with Octave; see the file COPYING. If not, see
22 <http://www.gnu.org/licenses/>.
23 
24 */
25 
26 // This file should not include config.h. It is only included in other
27 // C++ source files that should have included config.h before including
28 // this file.
29 
30 #include <cassert>
31 
32 #include <algorithm>
33 #include <iostream>
34 #include <limits>
35 #include <sstream>
36 #include <vector>
37 
38 #include "Array.h"
39 #include "MArray.h"
40 #include "Array-util.h"
41 #include "Range.h"
42 #include "idx-vector.h"
43 #include "lo-error.h"
44 #include "quit.h"
45 #include "oct-locbuf.h"
46 
47 #include "Sparse.h"
48 #include "sparse-sort.h"
49 #include "sparse-util.h"
50 #include "oct-spparms.h"
51 #include "mx-inlines.cc"
52 
53 #include "PermMatrix.h"
54 
55 template <typename T>
56 typename Sparse<T>::SparseRep *
58 {
59  static typename Sparse<T>::SparseRep nr;
60  return &nr;
61 }
62 
63 template <typename T>
65  : rep (new typename Sparse<T>::SparseRep (a.rows (), a.cols (), a.rows ())),
66  dimensions (dim_vector (a.rows (), a.cols ()))
67 {
68  octave_idx_type n = a.rows ();
69  for (octave_idx_type i = 0; i <= n; i++)
70  cidx (i) = i;
71 
72  const Array<octave_idx_type> pv = a.col_perm_vec ();
73 
74  for (octave_idx_type i = 0; i < n; i++)
75  ridx (i) = pv(i);
76 
77  for (octave_idx_type i = 0; i < n; i++)
78  data (i) = 1.0;
79 }
80 
81 template <typename T>
82 T&
84 {
86 
87  if (nzmx <= 0)
88  (*current_liboctave_error_handler)
89  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
90 
91  for (i = c[_c]; i < c[_c + 1]; i++)
92  if (r[i] == _r)
93  return d[i];
94  else if (r[i] > _r)
95  break;
96 
97  // Ok, If we've gotten here, we're in trouble. Have to create a
98  // new element in the sparse array. This' gonna be slow!!!
99  if (c[ncols] == nzmx)
100  (*current_liboctave_error_handler)
101  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
102 
103  octave_idx_type to_move = c[ncols] - i;
104  if (to_move != 0)
105  {
106  for (octave_idx_type j = c[ncols]; j > i; j--)
107  {
108  d[j] = d[j-1];
109  r[j] = r[j-1];
110  }
111  }
112 
113  for (octave_idx_type j = _c + 1; j < ncols + 1; j++)
114  c[j] = c[j] + 1;
115 
116  d[i] = 0.;
117  r[i] = _r;
118 
119  return d[i];
120 }
121 
122 template <typename T>
123 T
125 {
126  if (nzmx > 0)
127  for (octave_idx_type i = c[_c]; i < c[_c + 1]; i++)
128  if (r[i] == _r)
129  return d[i];
130  return T ();
131 }
132 
133 template <typename T>
134 void
136 {
137  if (remove_zeros)
138  {
139  octave_idx_type i = 0;
140  octave_idx_type k = 0;
141  for (octave_idx_type j = 1; j <= ncols; j++)
142  {
143  octave_idx_type u = c[j];
144  for (; i < u; i++)
145  if (d[i] != T ())
146  {
147  d[k] = d[i];
148  r[k++] = r[i];
149  }
150  c[j] = k;
151  }
152  }
153 
154  change_length (c[ncols]);
155 }
156 
157 template <typename T>
158 void
160 {
161  for (octave_idx_type j = ncols; j > 0 && c[j] > nz; j--)
162  c[j] = nz;
163 
164  // We shall skip reallocation if we have less than 1/frac extra elements to
165  // discard.
166  static const int frac = 5;
167  if (nz > nzmx || nz < nzmx - nzmx/frac)
168  {
169  // Reallocate.
170  octave_idx_type min_nzmx = std::min (nz, nzmx);
171 
172  octave_idx_type * new_ridx = new octave_idx_type [nz];
173  std::copy (r, r + min_nzmx, new_ridx);
174 
175  delete [] r;
176  r = new_ridx;
177 
178  T * new_data = new T [nz];
179  std::copy (d, d + min_nzmx, new_data);
180 
181  delete [] d;
182  d = new_data;
183 
184  nzmx = nz;
185  }
186 }
187 
188 template <typename T>
189 bool
191 {
192  return sparse_indices_ok (r, c, nrows, ncols, nnz ());
193 }
194 
195 template <typename T>
196 bool
198 {
199  octave_idx_type nz = nnz ();
200 
201  for (octave_idx_type i = 0; i < nz; i++)
202  if (octave::math::isnan (d[i]))
203  return true;
204 
205  return false;
206 }
207 
208 template <typename T>
210  : rep (0), dimensions (dim_vector (nr, nc))
211 {
212  if (val != T ())
213  {
214  rep = new typename Sparse<T>::SparseRep (nr, nc, dimensions.safe_numel ());
215 
216  octave_idx_type ii = 0;
217  xcidx (0) = 0;
218  for (octave_idx_type j = 0; j < nc; j++)
219  {
220  for (octave_idx_type i = 0; i < nr; i++)
221  {
222  xdata (ii) = val;
223  xridx (ii++) = i;
224  }
225  xcidx (j+1) = ii;
226  }
227  }
228  else
229  {
230  rep = new typename Sparse<T>::SparseRep (nr, nc, 0);
231  for (octave_idx_type j = 0; j < nc+1; j++)
232  xcidx (j) = 0;
233  }
234 }
235 
236 template <typename T>
238  : rep (0), dimensions (dv)
239 {
240  if (dv.ndims () != 2)
242  ("Sparse::Sparse (const dim_vector&): dimension mismatch");
243 
244  rep = new typename Sparse<T>::SparseRep (dv(0), dv(1), 0);
245 }
246 
247 template <typename T>
249  : rep (0), dimensions (dv)
250 {
251 
252  // Work in unsigned long long to avoid overflow issues with numel
253  unsigned long long a_nel = static_cast<unsigned long long>(a.rows ()) *
254  static_cast<unsigned long long>(a.cols ());
255  unsigned long long dv_nel = static_cast<unsigned long long>(dv(0)) *
256  static_cast<unsigned long long>(dv(1));
257 
258  if (a_nel != dv_nel)
259  (*current_liboctave_error_handler)
260  ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
261 
262  dim_vector old_dims = a.dims ();
263  octave_idx_type new_nzmx = a.nnz ();
264  octave_idx_type new_nr = dv(0);
265  octave_idx_type new_nc = dv(1);
266  octave_idx_type old_nr = old_dims(0);
267  octave_idx_type old_nc = old_dims(1);
268 
269  rep = new typename Sparse<T>::SparseRep (new_nr, new_nc, new_nzmx);
270 
271  octave_idx_type kk = 0;
272  xcidx (0) = 0;
273  for (octave_idx_type i = 0; i < old_nc; i++)
274  for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++)
275  {
276  octave_idx_type tmp = i * old_nr + a.ridx (j);
277  octave_idx_type ii = tmp % new_nr;
278  octave_idx_type jj = (tmp - ii) / new_nr;
279  for (octave_idx_type k = kk; k < jj; k++)
280  xcidx (k+1) = j;
281  kk = jj;
282  xdata (j) = a.data (j);
283  xridx (j) = ii;
284  }
285  for (octave_idx_type k = kk; k < new_nc; k++)
286  xcidx (k+1) = new_nzmx;
287 }
288 
289 template <typename T>
291  const idx_vector& c, octave_idx_type nr,
292  octave_idx_type nc, bool sum_terms,
293  octave_idx_type nzm)
294  : rep (0), dimensions ()
295 {
296  if (nr < 0)
297  nr = r.extent (0);
298  else if (r.extent (nr) > nr)
299  (*current_liboctave_error_handler) ("sparse: row index %d out of bound %d",
300  r.extent (nr), nr);
301 
302  if (nc < 0)
303  nc = c.extent (0);
304  else if (c.extent (nc) > nc)
306  ("sparse: column index %d out of bound %d", r.extent (nc), nc);
307 
308  dimensions = dim_vector (nr, nc);
309 
310  octave_idx_type n = a.numel ();
311  octave_idx_type rl = r.length (nr);
312  octave_idx_type cl = c.length (nc);
313  bool a_scalar = n == 1;
314  if (a_scalar)
315  {
316  if (rl != 1)
317  n = rl;
318  else if (cl != 1)
319  n = cl;
320  }
321 
322  if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
323  (*current_liboctave_error_handler) ("sparse: dimension mismatch");
324 
325  // Only create rep after input validation to avoid memory leak.
326  rep = new typename Sparse<T>::SparseRep (nr, nc, (nzm > 0 ? nzm : 0));
327 
328  if (rl <= 1 && cl <= 1)
329  {
330  if (n == 1 && a(0) != T ())
331  {
332  change_capacity (nzm > 1 ? nzm : 1);
333  xcidx (0) = 0;
334  xridx (0) = r(0);
335  xdata (0) = a(0);
336 
337  for (octave_idx_type j = 0; j < nc; j++)
338  xcidx (j+1) = j >= c(0);
339  }
340  }
341  else if (a_scalar)
342  {
343  // This is completely specialized, because the sorts can be simplified.
344  T a0 = a(0);
345  if (a0 == T ())
346  {
347  // Do nothing, it's an empty matrix.
348  }
349  else if (cl == 1)
350  {
351  // Sparse column vector. Sort row indices.
352  idx_vector rs = r.sorted ();
353 
354  octave_quit ();
355 
356  const octave_idx_type *rd = rs.raw ();
357  // Count unique indices.
358  octave_idx_type new_nz = 1;
359  for (octave_idx_type i = 1; i < n; i++)
360  new_nz += rd[i-1] != rd[i];
361  // Allocate result.
362  change_capacity (nzm > new_nz ? nzm : new_nz);
363  xcidx (0) = 0;
364  xcidx (1) = new_nz;
365  octave_idx_type *rri = ridx ();
366  T *rrd = data ();
367 
368  octave_quit ();
369 
370  octave_idx_type k = -1;
371  octave_idx_type l = -1;
372 
373  if (sum_terms)
374  {
375  // Sum repeated indices.
376  for (octave_idx_type i = 0; i < n; i++)
377  {
378  if (rd[i] != l)
379  {
380  l = rd[i];
381  rri[++k] = rd[i];
382  rrd[k] = a0;
383  }
384  else
385  rrd[k] += a0;
386  }
387  }
388  else
389  {
390  // Pick the last one.
391  for (octave_idx_type i = 0; i < n; i++)
392  {
393  if (rd[i] != l)
394  {
395  l = rd[i];
396  rri[++k] = rd[i];
397  rrd[k] = a0;
398  }
399  }
400  }
401 
402  }
403  else
404  {
405  idx_vector rr = r;
406  idx_vector cc = c;
407  const octave_idx_type *rd = rr.raw ();
408  const octave_idx_type *cd = cc.raw ();
410  ci[0] = 0;
411  // Bin counts of column indices.
412  for (octave_idx_type i = 0; i < n; i++)
413  ci[cd[i]+1]++;
414  // Make them cumulative, shifted one to right.
415  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
416  {
417  octave_idx_type s1 = s + ci[i];
418  ci[i] = s;
419  s = s1;
420  }
421 
422  octave_quit ();
423 
424  // Bucket sort.
426  for (octave_idx_type i = 0; i < n; i++)
427  if (rl == 1)
428  sidx[ci[cd[i]+1]++] = rd[0];
429  else
430  sidx[ci[cd[i]+1]++] = rd[i];
431 
432  // Subsorts. We don't need a stable sort, all values are equal.
433  xcidx (0) = 0;
434  for (octave_idx_type j = 0; j < nc; j++)
435  {
436  std::sort (sidx + ci[j], sidx + ci[j+1]);
437  octave_idx_type l = -1;
438  octave_idx_type nzj = 0;
439  // Count.
440  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
441  {
442  octave_idx_type k = sidx[i];
443  if (k != l)
444  {
445  l = k;
446  nzj++;
447  }
448  }
449  // Set column pointer.
450  xcidx (j+1) = xcidx (j) + nzj;
451  }
452 
453  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
454  octave_idx_type *rri = ridx ();
455  T *rrd = data ();
456 
457  // Fill-in data.
458  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
459  {
460  octave_quit ();
461  octave_idx_type l = -1;
462  if (sum_terms)
463  {
464  // Sum adjacent terms.
465  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
466  {
467  octave_idx_type k = sidx[i];
468  if (k != l)
469  {
470  l = k;
471  rrd[++jj] = a0;
472  rri[jj] = k;
473  }
474  else
475  rrd[jj] += a0;
476  }
477  }
478  else
479  {
480  // Use the last one.
481  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
482  {
483  octave_idx_type k = sidx[i];
484  if (k != l)
485  {
486  l = k;
487  rrd[++jj] = a0;
488  rri[jj] = k;
489  }
490  }
491  }
492  }
493  }
494  }
495  else if (cl == 1)
496  {
497  // Sparse column vector. Sort row indices.
499  idx_vector rs = r.sorted (rsi);
500 
501  octave_quit ();
502 
503  const octave_idx_type *rd = rs.raw ();
504  const octave_idx_type *rdi = rsi.data ();
505  // Count unique indices.
506  octave_idx_type new_nz = 1;
507  for (octave_idx_type i = 1; i < n; i++)
508  new_nz += rd[i-1] != rd[i];
509  // Allocate result.
510  change_capacity (nzm > new_nz ? nzm : new_nz);
511  xcidx (0) = 0;
512  xcidx (1) = new_nz;
513  octave_idx_type *rri = ridx ();
514  T *rrd = data ();
515 
516  octave_quit ();
517 
518  octave_idx_type k = 0;
519  rri[k] = rd[0];
520  rrd[k] = a(rdi[0]);
521 
522  if (sum_terms)
523  {
524  // Sum repeated indices.
525  for (octave_idx_type i = 1; i < n; i++)
526  {
527  if (rd[i] != rd[i-1])
528  {
529  rri[++k] = rd[i];
530  rrd[k] = a(rdi[i]);
531  }
532  else
533  rrd[k] += a(rdi[i]);
534  }
535  }
536  else
537  {
538  // Pick the last one.
539  for (octave_idx_type i = 1; i < n; i++)
540  {
541  if (rd[i] != rd[i-1])
542  rri[++k] = rd[i];
543  rrd[k] = a(rdi[i]);
544  }
545  }
546 
547  maybe_compress (true);
548  }
549  else
550  {
551  idx_vector rr = r;
552  idx_vector cc = c;
553  const octave_idx_type *rd = rr.raw ();
554  const octave_idx_type *cd = cc.raw ();
556  ci[0] = 0;
557  // Bin counts of column indices.
558  for (octave_idx_type i = 0; i < n; i++)
559  ci[cd[i]+1]++;
560  // Make them cumulative, shifted one to right.
561  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
562  {
563  octave_idx_type s1 = s + ci[i];
564  ci[i] = s;
565  s = s1;
566  }
567 
568  octave_quit ();
569 
570  typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
571  // Bucket sort.
572  OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
573  for (octave_idx_type i = 0; i < n; i++)
574  {
575  idx_pair& p = spairs[ci[cd[i]+1]++];
576  if (rl == 1)
577  p.first = rd[0];
578  else
579  p.first = rd[i];
580  p.second = i;
581  }
582 
583  // Subsorts. We don't need a stable sort, the second index stabilizes it.
584  xcidx (0) = 0;
585  for (octave_idx_type j = 0; j < nc; j++)
586  {
587  std::sort (spairs + ci[j], spairs + ci[j+1]);
588  octave_idx_type l = -1;
589  octave_idx_type nzj = 0;
590  // Count.
591  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
592  {
593  octave_idx_type k = spairs[i].first;
594  if (k != l)
595  {
596  l = k;
597  nzj++;
598  }
599  }
600  // Set column pointer.
601  xcidx (j+1) = xcidx (j) + nzj;
602  }
603 
604  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
605  octave_idx_type *rri = ridx ();
606  T *rrd = data ();
607 
608  // Fill-in data.
609  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
610  {
611  octave_quit ();
612  octave_idx_type l = -1;
613  if (sum_terms)
614  {
615  // Sum adjacent terms.
616  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
617  {
618  octave_idx_type k = spairs[i].first;
619  if (k != l)
620  {
621  l = k;
622  rrd[++jj] = a(spairs[i].second);
623  rri[jj] = k;
624  }
625  else
626  rrd[jj] += a(spairs[i].second);
627  }
628  }
629  else
630  {
631  // Use the last one.
632  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
633  {
634  octave_idx_type k = spairs[i].first;
635  if (k != l)
636  {
637  l = k;
638  rri[++jj] = k;
639  }
640  rrd[jj] = a(spairs[i].second);
641  }
642  }
643  }
644 
645  maybe_compress (true);
646  }
647 }
648 
649 template <typename T>
651  : rep (0), dimensions (a.dims ())
652 {
653  if (dimensions.ndims () > 2)
655  ("Sparse::Sparse (const Array<T>&): dimension mismatch");
656 
657  octave_idx_type nr = rows ();
658  octave_idx_type nc = cols ();
659  octave_idx_type len = a.numel ();
660  octave_idx_type new_nzmx = 0;
661 
662  // First count the number of nonzero terms
663  for (octave_idx_type i = 0; i < len; i++)
664  if (a(i) != T ())
665  new_nzmx++;
666 
667  rep = new typename Sparse<T>::SparseRep (nr, nc, new_nzmx);
668 
669  octave_idx_type ii = 0;
670  xcidx (0) = 0;
671  for (octave_idx_type j = 0; j < nc; j++)
672  {
673  for (octave_idx_type i = 0; i < nr; i++)
674  if (a.elem (i,j) != T ())
675  {
676  xdata (ii) = a.elem (i,j);
677  xridx (ii++) = i;
678  }
679  xcidx (j+1) = ii;
680  }
681 }
682 
683 template <typename T>
685 {
686  if (--rep->count == 0)
687  delete rep;
688 }
689 
690 template <typename T>
691 Sparse<T>&
693 {
694  if (this != &a)
695  {
696  if (--rep->count == 0)
697  delete rep;
698 
699  rep = a.rep;
700  rep->count++;
701 
702  dimensions = a.dimensions;
703  }
704 
705  return *this;
706 }
707 
708 template <typename T>
711 {
712  octave_idx_type n = dimensions.ndims ();
713 
714  if (n <= 0 || n != ra_idx.numel ())
716  ("Sparse<T>::compute_index: invalid ra_idxing operation");
717 
718  octave_idx_type retval = -1;
719 
720  retval = ra_idx(--n);
721 
722  while (--n >= 0)
723  {
724  retval *= dimensions(n);
725  retval += ra_idx(n);
726  }
727 
728  return retval;
729 }
730 
731 template <typename T>
732 T
734 {
735  (*current_liboctave_error_handler) ("%s (%d): range error", fcn, n);
736 }
737 
738 template <typename T>
739 T&
741 {
742  (*current_liboctave_error_handler) ("%s (%d): range error", fcn, n);
743 }
744 
745 template <typename T>
746 T
748  octave_idx_type j) const
749 {
750  (*current_liboctave_error_handler) ("%s (%d, %d): range error", fcn, i, j);
751 }
752 
753 template <typename T>
754 T&
756 {
757  (*current_liboctave_error_handler) ("%s (%d, %d): range error", fcn, i, j);
758 }
759 
760 template <typename T>
761 T
763  const Array<octave_idx_type>& ra_idx) const
764 {
765  std::ostringstream buf;
766 
767  buf << fcn << " (";
768 
769  octave_idx_type n = ra_idx.numel ();
770 
771  if (n > 0)
772  buf << ra_idx(0);
773 
774  for (octave_idx_type i = 1; i < n; i++)
775  buf << ", " << ra_idx(i);
776 
777  buf << "): range error";
778 
779  std::string buf_str = buf.str ();
780 
781  (*current_liboctave_error_handler) (buf_str.c_str ());
782 }
783 
784 template <typename T>
785 T&
787 {
788  std::ostringstream buf;
789 
790  buf << fcn << " (";
791 
792  octave_idx_type n = ra_idx.numel ();
793 
794  if (n > 0)
795  buf << ra_idx(0);
796 
797  for (octave_idx_type i = 1; i < n; i++)
798  buf << ", " << ra_idx(i);
799 
800  buf << "): range error";
801 
802  std::string buf_str = buf.str ();
803 
804  (*current_liboctave_error_handler) (buf_str.c_str ());
805 }
806 
807 template <typename T>
808 Sparse<T>
809 Sparse<T>::reshape (const dim_vector& new_dims) const
810 {
812  dim_vector dims2 = new_dims;
813 
814  if (dims2.ndims () > 2)
815  {
816  (*current_liboctave_warning_with_id_handler)
817  ("Octave:reshape-smashes-dims",
818  "reshape: sparse reshape to N-D array smashes dims");
819 
820  for (octave_idx_type i = 2; i < dims2.ndims (); i++)
821  dims2(1) *= dims2(i);
822 
823  dims2.resize (2);
824  }
825 
826  if (dimensions != dims2)
827  {
828  if (dimensions.numel () == dims2.numel ())
829  {
830  octave_idx_type new_nnz = nnz ();
831  octave_idx_type new_nr = dims2 (0);
832  octave_idx_type new_nc = dims2 (1);
833  octave_idx_type old_nr = rows ();
834  octave_idx_type old_nc = cols ();
835  retval = Sparse<T> (new_nr, new_nc, new_nnz);
836 
837  octave_idx_type kk = 0;
838  retval.xcidx (0) = 0;
839  // Quotient and remainder of i * old_nr divided by new_nr.
840  // Track them individually to avoid overflow (bug #42850).
841  octave_idx_type i_old_qu = 0;
842  octave_idx_type i_old_rm = static_cast<octave_idx_type> (-old_nr);
843  for (octave_idx_type i = 0; i < old_nc; i++)
844  {
845  i_old_rm += old_nr;
846  if (i_old_rm >= new_nr)
847  {
848  i_old_qu += i_old_rm / new_nr;
849  i_old_rm = i_old_rm % new_nr;
850  }
851  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
852  {
853  octave_idx_type ii, jj;
854  ii = (i_old_rm + ridx (j)) % new_nr;
855  jj = i_old_qu + (i_old_rm + ridx (j)) / new_nr;
856 
857  // Original calculation subject to overflow
858  // ii = (i*old_nr + ridx (j)) % new_nr
859  // jj = (i*old_nr + ridx (j)) / new_nr
860 
861  for (octave_idx_type k = kk; k < jj; k++)
862  retval.xcidx (k+1) = j;
863  kk = jj;
864  retval.xdata (j) = data (j);
865  retval.xridx (j) = ii;
866  }
867  }
868  for (octave_idx_type k = kk; k < new_nc; k++)
869  retval.xcidx (k+1) = new_nnz;
870  }
871  else
872  {
873  std::string dimensions_str = dimensions.str ();
874  std::string new_dims_str = new_dims.str ();
875 
876  (*current_liboctave_error_handler)
877  ("reshape: can't reshape %s array to %s array",
878  dimensions_str.c_str (), new_dims_str.c_str ());
879  }
880  }
881  else
882  retval = *this;
883 
884  return retval;
885 }
886 
887 template <typename T>
888 Sparse<T>
889 Sparse<T>::permute (const Array<octave_idx_type>& perm_vec, bool) const
890 {
891  // The only valid permutations of a sparse array are [1, 2] and [2, 1].
892 
893  bool fail = false;
894  bool trans = false;
895 
896  if (perm_vec.numel () == 2)
897  {
898  if (perm_vec(0) == 0 && perm_vec(1) == 1)
899  /* do nothing */;
900  else if (perm_vec(0) == 1 && perm_vec(1) == 0)
901  trans = true;
902  else
903  fail = true;
904  }
905  else
906  fail = true;
907 
908  if (fail)
909  (*current_liboctave_error_handler)
910  ("permutation vector contains an invalid element");
911 
912  return trans ? this->transpose () : *this;
913 }
914 
915 template <typename T>
916 void
918 {
919  octave_idx_type nr = rows ();
920  octave_idx_type nc = cols ();
921 
922  if (nr == 0)
923  resize (1, std::max (nc, n));
924  else if (nc == 0)
925  resize (nr, (n + nr - 1) / nr); // Ain't it wicked?
926  else if (nr == 1)
927  resize (1, n);
928  else if (nc == 1)
929  resize (n, 1);
930  else
932 }
933 
934 template <typename T>
935 void
937 {
938  octave_idx_type n = dv.ndims ();
939 
940  if (n != 2)
941  (*current_liboctave_error_handler) ("sparse array must be 2-D");
942 
943  resize (dv(0), dv(1));
944 }
945 
946 template <typename T>
947 void
949 {
950  if (r < 0 || c < 0)
951  (*current_liboctave_error_handler) ("can't resize to negative dimension");
952 
953  if (r == dim1 () && c == dim2 ())
954  return;
955 
956  // This wouldn't be necessary for r >= rows () if nrows wasn't part of the
957  // Sparse rep. It is not good for anything in there.
958  make_unique ();
959 
960  if (r < rows ())
961  {
962  octave_idx_type i = 0;
963  octave_idx_type k = 0;
964  for (octave_idx_type j = 1; j <= rep->ncols; j++)
965  {
966  octave_idx_type u = xcidx (j);
967  for (; i < u; i++)
968  if (xridx (i) < r)
969  {
970  xdata (k) = xdata (i);
971  xridx (k++) = xridx (i);
972  }
973  xcidx (j) = k;
974  }
975  }
976 
977  rep->nrows = dimensions(0) = r;
978 
979  if (c != rep->ncols)
980  {
981  octave_idx_type *new_cidx = new octave_idx_type [c+1];
982  std::copy (rep->c, rep->c + std::min (c, rep->ncols) + 1, new_cidx);
983  delete [] rep->c;
984  rep->c = new_cidx;
985 
986  if (c > rep->ncols)
987  std::fill_n (rep->c + rep->ncols + 1, c - rep->ncols,
988  rep->c[rep->ncols]);
989  }
990 
991  rep->ncols = dimensions(1) = c;
992 
993  rep->change_length (rep->nnz ());
994 }
995 
996 template <typename T>
997 Sparse<T>&
999 {
1000  octave_idx_type a_rows = a.rows ();
1001  octave_idx_type a_cols = a.cols ();
1002  octave_idx_type nr = rows ();
1003  octave_idx_type nc = cols ();
1004 
1005  if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
1006  (*current_liboctave_error_handler) ("range error for insert");
1007 
1008  // First count the number of elements in the final array
1009  octave_idx_type nel = cidx (c) + a.nnz ();
1010 
1011  if (c + a_cols < nc)
1012  nel += cidx (nc) - cidx (c + a_cols);
1013 
1014  for (octave_idx_type i = c; i < c + a_cols; i++)
1015  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1016  if (ridx (j) < r || ridx (j) >= r + a_rows)
1017  nel++;
1018 
1019  Sparse<T> tmp (*this);
1020  --rep->count;
1021  rep = new typename Sparse<T>::SparseRep (nr, nc, nel);
1022 
1023  for (octave_idx_type i = 0; i < tmp.cidx (c); i++)
1024  {
1025  data (i) = tmp.data (i);
1026  ridx (i) = tmp.ridx (i);
1027  }
1028  for (octave_idx_type i = 0; i < c + 1; i++)
1029  cidx (i) = tmp.cidx (i);
1030 
1031  octave_idx_type ii = cidx (c);
1032 
1033  for (octave_idx_type i = c; i < c + a_cols; i++)
1034  {
1035  octave_quit ();
1036 
1037  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1038  if (tmp.ridx (j) < r)
1039  {
1040  data (ii) = tmp.data (j);
1041  ridx (ii++) = tmp.ridx (j);
1042  }
1043 
1044  octave_quit ();
1045 
1046  for (octave_idx_type j = a.cidx (i-c); j < a.cidx (i-c+1); j++)
1047  {
1048  data (ii) = a.data (j);
1049  ridx (ii++) = r + a.ridx (j);
1050  }
1051 
1052  octave_quit ();
1053 
1054  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1055  if (tmp.ridx (j) >= r + a_rows)
1056  {
1057  data (ii) = tmp.data (j);
1058  ridx (ii++) = tmp.ridx (j);
1059  }
1060 
1061  cidx (i+1) = ii;
1062  }
1063 
1064  for (octave_idx_type i = c + a_cols; i < nc; i++)
1065  {
1066  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1067  {
1068  data (ii) = tmp.data (j);
1069  ridx (ii++) = tmp.ridx (j);
1070  }
1071  cidx (i+1) = ii;
1072  }
1073 
1074  return *this;
1075 }
1076 
1077 template <typename T>
1078 Sparse<T>&
1080 {
1081 
1082  if (ra_idx.numel () != 2)
1083  (*current_liboctave_error_handler) ("range error for insert");
1084 
1085  return insert (a, ra_idx(0), ra_idx(1));
1086 }
1087 
1088 template <typename T>
1089 Sparse<T>
1091 {
1092  assert (ndims () == 2);
1093 
1094  octave_idx_type nr = rows ();
1095  octave_idx_type nc = cols ();
1096  octave_idx_type nz = nnz ();
1097  Sparse<T> retval (nc, nr, nz);
1098 
1099  for (octave_idx_type i = 0; i < nz; i++)
1100  retval.xcidx (ridx (i) + 1)++;
1101  // retval.xcidx[1:nr] holds the row degrees for rows 0:(nr-1)
1102  nz = 0;
1103  for (octave_idx_type i = 1; i <= nr; i++)
1104  {
1105  const octave_idx_type tmp = retval.xcidx (i);
1106  retval.xcidx (i) = nz;
1107  nz += tmp;
1108  }
1109  // retval.xcidx[1:nr] holds row entry *start* offsets for rows 0:(nr-1)
1110 
1111  for (octave_idx_type j = 0; j < nc; j++)
1112  for (octave_idx_type k = cidx (j); k < cidx (j+1); k++)
1113  {
1114  octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
1115  retval.xridx (q) = j;
1116  retval.xdata (q) = data (k);
1117  }
1118  assert (nnz () == retval.xcidx (nr));
1119  // retval.xcidx[1:nr] holds row entry *end* offsets for rows 0:(nr-1)
1120  // and retval.xcidx[0:(nr-1)] holds their row entry *start* offsets
1121 
1122  return retval;
1123 }
1124 
1125 // Lower bound lookup. Could also use octave_sort, but that has upper bound
1126 // semantics, so requires some manipulation to set right. Uses a plain loop
1127 // for small columns.
1128 static octave_idx_type
1130  octave_idx_type ri)
1131 {
1132  if (nr <= 8)
1133  {
1134  octave_idx_type l;
1135  for (l = 0; l < nr; l++)
1136  if (ridx[l] >= ri)
1137  break;
1138  return l;
1139  }
1140  else
1141  return std::lower_bound (ridx, ridx + nr, ri) - ridx;
1142 }
1143 
1144 template <typename T>
1145 void
1147 {
1148  Sparse<T> retval;
1149 
1150  assert (ndims () == 2);
1151 
1152  octave_idx_type nr = dim1 ();
1153  octave_idx_type nc = dim2 ();
1154  octave_idx_type nz = nnz ();
1155 
1156  octave_idx_type nel = numel (); // Can throw.
1157 
1158  const dim_vector idx_dims = idx.orig_dimensions ();
1159 
1160  if (idx.extent (nel) > nel)
1161  octave::err_del_index_out_of_range (true, idx.extent (nel), nel);
1162 
1163  if (nc == 1)
1164  {
1165  // Sparse column vector.
1166  const Sparse<T> tmp = *this; // constant copy to prevent COW.
1167 
1168  octave_idx_type lb, ub;
1169 
1170  if (idx.is_cont_range (nel, lb, ub))
1171  {
1172  // Special-case a contiguous range.
1173  // Look-up indices first.
1174  octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
1175  octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
1176  // Copy data and adjust indices.
1177  octave_idx_type nz_new = nz - (ui - li);
1178  *this = Sparse<T> (nr - (ub - lb), 1, nz_new);
1179  std::copy (tmp.data (), tmp.data () + li, data ());
1180  std::copy (tmp.ridx (), tmp.ridx () + li, xridx ());
1181  std::copy (tmp.data () + ui, tmp.data () + nz, xdata () + li);
1182  mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
1183  xcidx (1) = nz_new;
1184  }
1185  else
1186  {
1187  OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
1188  OCTAVE_LOCAL_BUFFER (T, data_new, nz);
1189  idx_vector sidx = idx.sorted (true);
1190  const octave_idx_type *sj = sidx.raw ();
1191  octave_idx_type sl = sidx.length (nel);
1192  octave_idx_type nz_new = 0;
1193  octave_idx_type j = 0;
1194  for (octave_idx_type i = 0; i < nz; i++)
1195  {
1196  octave_idx_type r = tmp.ridx (i);
1197  for (; j < sl && sj[j] < r; j++) ;
1198  if (j == sl || sj[j] > r)
1199  {
1200  data_new[nz_new] = tmp.data (i);
1201  ridx_new[nz_new++] = r - j;
1202  }
1203  }
1204 
1205  *this = Sparse<T> (nr - sl, 1, nz_new);
1206  std::copy (ridx_new, ridx_new + nz_new, ridx ());
1207  std::copy (data_new, data_new + nz_new, xdata ());
1208  xcidx (1) = nz_new;
1209  }
1210  }
1211  else if (nr == 1)
1212  {
1213  // Sparse row vector.
1214  octave_idx_type lb, ub;
1215  if (idx.is_cont_range (nc, lb, ub))
1216  {
1217  const Sparse<T> tmp = *this;
1218  octave_idx_type lbi = tmp.cidx (lb);
1219  octave_idx_type ubi = tmp.cidx (ub);
1220  octave_idx_type new_nz = nz - (ubi - lbi);
1221  *this = Sparse<T> (1, nc - (ub - lb), new_nz);
1222  std::copy (tmp.data (), tmp.data () + lbi, data ());
1223  std::copy (tmp.data () + ubi, tmp.data () + nz , xdata () + lbi);
1224  std::fill_n (ridx (), new_nz, static_cast<octave_idx_type> (0));
1225  std::copy (tmp.cidx () + 1, tmp.cidx () + 1 + lb, cidx () + 1);
1226  mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1,
1227  ubi - lbi);
1228  }
1229  else
1230  *this = index (idx.complement (nc));
1231  }
1232  else if (idx.length (nel) != 0)
1233  {
1234  if (idx.is_colon_equiv (nel))
1235  *this = Sparse<T> ();
1236  else
1237  {
1238  *this = index (idx_vector::colon);
1239  delete_elements (idx);
1240  *this = transpose (); // We want a row vector.
1241  }
1242  }
1243 }
1244 
1245 template <typename T>
1246 void
1248 {
1249  assert (ndims () == 2);
1250 
1251  octave_idx_type nr = dim1 ();
1252  octave_idx_type nc = dim2 ();
1253  octave_idx_type nz = nnz ();
1254 
1255  if (idx_i.is_colon ())
1256  {
1257  // Deleting columns.
1258  octave_idx_type lb, ub;
1259  if (idx_j.extent (nc) > nc)
1260  octave::err_del_index_out_of_range (false, idx_j.extent (nc), nc);
1261  else if (idx_j.is_cont_range (nc, lb, ub))
1262  {
1263  if (lb == 0 && ub == nc)
1264  {
1265  // Delete all rows and columns.
1266  *this = Sparse<T> (nr, 0);
1267  }
1268  else if (nz == 0)
1269  {
1270  // No elements to preserve; adjust dimensions.
1271  *this = Sparse<T> (nr, nc - (ub - lb));
1272  }
1273  else
1274  {
1275  const Sparse<T> tmp = *this;
1276  octave_idx_type lbi = tmp.cidx (lb);
1277  octave_idx_type ubi = tmp.cidx (ub);
1278  octave_idx_type new_nz = nz - (ubi - lbi);
1279 
1280  *this = Sparse<T> (nr, nc - (ub - lb), new_nz);
1281  std::copy (tmp.data (), tmp.data () + lbi, data ());
1282  std::copy (tmp.ridx (), tmp.ridx () + lbi, ridx ());
1283  std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1284  std::copy (tmp.ridx () + ubi, tmp.ridx () + nz, xridx () + lbi);
1285  std::copy (tmp.cidx () + 1, tmp.cidx () + 1 + lb, cidx () + 1);
1286  mx_inline_sub (nc - ub, xcidx () + lb + 1,
1287  tmp.cidx () + ub + 1, ubi - lbi);
1288  }
1289  }
1290  else
1291  *this = index (idx_i, idx_j.complement (nc));
1292  }
1293  else if (idx_j.is_colon ())
1294  {
1295  // Deleting rows.
1296  octave_idx_type lb, ub;
1297  if (idx_i.extent (nr) > nr)
1298  octave::err_del_index_out_of_range (false, idx_i.extent (nr), nr);
1299  else if (idx_i.is_cont_range (nr, lb, ub))
1300  {
1301  if (lb == 0 && ub == nr)
1302  {
1303  // Delete all rows and columns.
1304  *this = Sparse<T> (0, nc);
1305  }
1306  else if (nz == 0)
1307  {
1308  // No elements to preserve; adjust dimensions.
1309  *this = Sparse<T> (nr - (ub - lb), nc);
1310  }
1311  else
1312  {
1313  // This is more memory-efficient than the approach below.
1314  const Sparse<T> tmpl = index (idx_vector (0, lb), idx_j);
1315  const Sparse<T> tmpu = index (idx_vector (ub, nr), idx_j);
1316  *this = Sparse<T> (nr - (ub - lb), nc,
1317  tmpl.nnz () + tmpu.nnz ());
1318  for (octave_idx_type j = 0, k = 0; j < nc; j++)
1319  {
1320  for (octave_idx_type i = tmpl.cidx (j); i < tmpl.cidx (j+1);
1321  i++)
1322  {
1323  xdata (k) = tmpl.data (i);
1324  xridx (k++) = tmpl.ridx (i);
1325  }
1326  for (octave_idx_type i = tmpu.cidx (j); i < tmpu.cidx (j+1);
1327  i++)
1328  {
1329  xdata (k) = tmpu.data (i);
1330  xridx (k++) = tmpu.ridx (i) + lb;
1331  }
1332 
1333  xcidx (j+1) = k;
1334  }
1335  }
1336  }
1337  else
1338  {
1339  // This is done by transposing, deleting columns, then transposing
1340  // again.
1341  Sparse<T> tmp = transpose ();
1342  tmp.delete_elements (idx_j, idx_i);
1343  *this = tmp.transpose ();
1344  }
1345  }
1346  else
1347  {
1348  // Empty assignment (no elements to delete) is OK if there is at
1349  // least one zero-length index and at most one other index that is
1350  // non-colon (or equivalent) index. Since we only have two
1351  // indices, we just need to check that we have at least one zero
1352  // length index. Matlab considers "[]" to be an empty index but
1353  // not "false". We accept both.
1354 
1355  bool empty_assignment
1356  = (idx_i.length (nr) == 0 || idx_j.length (nc) == 0);
1357 
1358  if (! empty_assignment)
1359  (*current_liboctave_error_handler)
1360  ("a null assignment can only have one non-colon index");
1361  }
1362 }
1363 
1364 template <typename T>
1365 void
1367 {
1368  if (dim == 0)
1369  delete_elements (idx, idx_vector::colon);
1370  else if (dim == 1)
1371  delete_elements (idx_vector::colon, idx);
1372  else
1373  (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1374 }
1375 
1376 template <typename T>
1377 Sparse<T>
1378 Sparse<T>::index (const idx_vector& idx, bool resize_ok) const
1379 {
1380  Sparse<T> retval;
1381 
1382  assert (ndims () == 2);
1383 
1384  octave_idx_type nr = dim1 ();
1385  octave_idx_type nc = dim2 ();
1386  octave_idx_type nz = nnz ();
1387 
1388  octave_idx_type nel = numel (); // Can throw.
1389 
1390  const dim_vector idx_dims = idx.orig_dimensions ().redim (2);
1391 
1392  if (idx.is_colon ())
1393  {
1394  if (nc == 1)
1395  retval = *this;
1396  else
1397  {
1398  // Fast magic colon processing.
1399  retval = Sparse<T> (nel, 1, nz);
1400 
1401  for (octave_idx_type i = 0; i < nc; i++)
1402  {
1403  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1404  {
1405  retval.xdata (j) = data (j);
1406  retval.xridx (j) = ridx (j) + i * nr;
1407  }
1408  }
1409 
1410  retval.xcidx (0) = 0;
1411  retval.xcidx (1) = nz;
1412  }
1413  }
1414  else if (idx.extent (nel) > nel)
1415  {
1416  if (! resize_ok)
1417  octave::err_index_out_of_range (1, 1, idx.extent (nel), nel, dims ());
1418 
1419  // resize_ok is completely handled here.
1420  octave_idx_type ext = idx.extent (nel);
1421  Sparse<T> tmp = *this;
1422  tmp.resize1 (ext);
1423  retval = tmp.index (idx);
1424  }
1425  else if (nr == 1 && nc == 1)
1426  {
1427  // You have to be pretty sick to get to this bit of code,
1428  // since you have a scalar stored as a sparse matrix, and
1429  // then want to make a dense matrix with sparse representation.
1430  // Ok, we'll do it, but you deserve what you get!!
1431  retval = Sparse<T> (idx_dims(0), idx_dims(1), nz ? data (0) : T ());
1432  }
1433  else if (nc == 1)
1434  {
1435  // Sparse column vector.
1436  octave_idx_type lb, ub;
1437 
1438  if (idx.is_scalar ())
1439  {
1440  // Scalar index - just a binary lookup.
1441  octave_idx_type i = lblookup (ridx (), nz, idx(0));
1442  if (i < nz && ridx (i) == idx(0))
1443  retval = Sparse (1, 1, data (i));
1444  else
1445  retval = Sparse (1, 1);
1446  }
1447  else if (idx.is_cont_range (nel, lb, ub))
1448  {
1449  // Special-case a contiguous range.
1450  // Look-up indices first.
1451  octave_idx_type li = lblookup (ridx (), nz, lb);
1452  octave_idx_type ui = lblookup (ridx (), nz, ub);
1453  // Copy data and adjust indices.
1454  octave_idx_type nz_new = ui - li;
1455  retval = Sparse<T> (ub - lb, 1, nz_new);
1456  std::copy (data () + li, data () + li + nz_new, retval.data ());
1457  mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
1458  retval.xcidx (1) = nz_new;
1459  }
1460  else if (idx.is_permutation (nel) && idx.is_vector ())
1461  {
1462  if (idx.is_range () && idx.increment () == -1)
1463  {
1464  retval = Sparse<T> (nr, 1, nz);
1465 
1466  for (octave_idx_type j = 0; j < nz; j++)
1467  retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
1468 
1469  std::copy (cidx (), cidx () + 2, retval.cidx ());
1470  std::reverse_copy (data (), data () + nz, retval.data ());
1471  }
1472  else
1473  {
1474  Array<T> tmp = array_value ();
1475  tmp = tmp.index (idx);
1476  retval = Sparse<T> (tmp);
1477  }
1478  }
1479  else
1480  {
1481  // If indexing a sparse column vector by a vector, the result is a
1482  // sparse column vector, otherwise it inherits the shape of index.
1483  // Vector transpose is cheap, so do it right here.
1484 
1485  Array<octave_idx_type> tmp_idx = idx.as_array ().as_matrix ();
1486 
1487  const Array<octave_idx_type> idxa = (idx_dims(0) == 1
1488  ? tmp_idx.transpose ()
1489  : tmp_idx);
1490 
1491  octave_idx_type new_nr = idxa.rows ();
1492  octave_idx_type new_nc = idxa.cols ();
1493 
1494  // Lookup.
1495  // FIXME: Could specialize for sorted idx?
1496  NoAlias< Array<octave_idx_type> > lidx (dim_vector (new_nr, new_nc));
1497  for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
1498  lidx(i) = lblookup (ridx (), nz, idxa(i));
1499 
1500  // Count matches.
1501  retval = Sparse<T> (idxa.rows (), idxa.cols ());
1502  for (octave_idx_type j = 0; j < new_nc; j++)
1503  {
1504  octave_idx_type nzj = 0;
1505  for (octave_idx_type i = 0; i < new_nr; i++)
1506  {
1507  octave_idx_type l = lidx(i, j);
1508  if (l < nz && ridx (l) == idxa(i, j))
1509  nzj++;
1510  else
1511  lidx(i, j) = nz;
1512  }
1513  retval.xcidx (j+1) = retval.xcidx (j) + nzj;
1514  }
1515 
1516  retval.change_capacity (retval.xcidx (new_nc));
1517 
1518  // Copy data and set row indices.
1519  octave_idx_type k = 0;
1520  for (octave_idx_type j = 0; j < new_nc; j++)
1521  for (octave_idx_type i = 0; i < new_nr; i++)
1522  {
1523  octave_idx_type l = lidx(i, j);
1524  if (l < nz)
1525  {
1526  retval.data (k) = data (l);
1527  retval.xridx (k++) = i;
1528  }
1529  }
1530  }
1531  }
1532  else if (nr == 1)
1533  {
1534  octave_idx_type lb, ub;
1535  if (idx.is_scalar ())
1536  retval = Sparse<T> (1, 1, elem (0, idx(0)));
1537  else if (idx.is_cont_range (nel, lb, ub))
1538  {
1539  // Special-case a contiguous range.
1540  octave_idx_type lbi = cidx (lb);
1541  octave_idx_type ubi = cidx (ub);
1542  octave_idx_type new_nz = ubi - lbi;
1543  retval = Sparse<T> (1, ub - lb, new_nz);
1544  std::copy (data () + lbi, data () + lbi + new_nz, retval.data ());
1545  std::fill_n (retval.ridx (), new_nz, static_cast<octave_idx_type> (0));
1546  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1547  }
1548  else
1549  {
1550  // Sparse row vectors occupy O(nr) storage anyway, so let's just
1551  // convert the matrix to full, index, and sparsify the result.
1552  retval = Sparse<T> (array_value ().index (idx));
1553  }
1554  }
1555  else
1556  {
1557  if (nr != 0 && idx.is_scalar ())
1558  retval = Sparse<T> (1, 1, elem (idx(0) % nr, idx(0) / nr));
1559  else
1560  {
1561  // Indexing a non-vector sparse matrix by linear indexing.
1562  // I suppose this is rare (and it may easily overflow), so let's take
1563  // the easy way, and reshape first to column vector, which is already
1564  // handled above.
1565  retval = index (idx_vector::colon).index (idx);
1566  // In this case we're supposed to always inherit the shape, but
1567  // column(row) doesn't do it, so we'll do it instead.
1568  if (idx_dims(0) == 1 && idx_dims(1) != 1)
1569  retval = retval.transpose ();
1570  }
1571  }
1572 
1573  return retval;
1574 }
1575 
1576 template <typename T>
1577 Sparse<T>
1578 Sparse<T>::index (const idx_vector& idx_i, const idx_vector& idx_j,
1579  bool resize_ok) const
1580 {
1581  Sparse<T> retval;
1582 
1583  assert (ndims () == 2);
1584 
1585  octave_idx_type nr = dim1 ();
1586  octave_idx_type nc = dim2 ();
1587 
1588  octave_idx_type n = idx_i.length (nr);
1589  octave_idx_type m = idx_j.length (nc);
1590 
1591  octave_idx_type lb, ub;
1592 
1593  if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
1594  {
1595  // resize_ok is completely handled here.
1596  if (resize_ok)
1597  {
1598  octave_idx_type ext_i = idx_i.extent (nr);
1599  octave_idx_type ext_j = idx_j.extent (nc);
1600  Sparse<T> tmp = *this;
1601  tmp.resize (ext_i, ext_j);
1602  retval = tmp.index (idx_i, idx_j);
1603  }
1604  else if (idx_i.extent (nr) > nr)
1605  octave::err_index_out_of_range (2, 1, idx_i.extent (nr), nr, dims ());
1606  else
1607  octave::err_index_out_of_range (2, 2, idx_j.extent (nc), nc, dims ());
1608  }
1609  else if (nr == 1 && nc == 1)
1610  {
1611  // Scalars stored as sparse matrices occupy more memory than
1612  // a scalar, so let's just convert the matrix to full, index,
1613  // and sparsify the result.
1614 
1615  retval = Sparse<T> (array_value ().index (idx_i, idx_j));
1616  }
1617  else if (idx_i.is_colon ())
1618  {
1619  // Great, we're just manipulating columns. This is going to be quite
1620  // efficient, because the columns can stay compressed as they are.
1621  if (idx_j.is_colon ())
1622  retval = *this; // Shallow copy.
1623  else if (idx_j.is_cont_range (nc, lb, ub))
1624  {
1625  // Special-case a contiguous range.
1626  octave_idx_type lbi = cidx (lb);
1627  octave_idx_type ubi = cidx (ub);
1628  octave_idx_type new_nz = ubi - lbi;
1629  retval = Sparse<T> (nr, ub - lb, new_nz);
1630  std::copy (data () + lbi, data () + lbi + new_nz, retval.data ());
1631  std::copy (ridx () + lbi, ridx () + lbi + new_nz, retval.ridx ());
1632  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1633  }
1634  else
1635  {
1636  // Count new nonzero elements.
1637  retval = Sparse<T> (nr, m);
1638  for (octave_idx_type j = 0; j < m; j++)
1639  {
1640  octave_idx_type jj = idx_j(j);
1641  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1642  }
1643 
1644  retval.change_capacity (retval.xcidx (m));
1645 
1646  // Copy data & indices.
1647  for (octave_idx_type j = 0; j < m; j++)
1648  {
1649  octave_idx_type ljj = cidx (idx_j(j));
1650  octave_idx_type lj = retval.xcidx (j);
1651  octave_idx_type nzj = retval.xcidx (j+1) - lj;
1652 
1653  std::copy (data () + ljj, data () + ljj + nzj, retval.data () + lj);
1654  std::copy (ridx () + ljj, ridx () + ljj + nzj, retval.ridx () + lj);
1655  }
1656  }
1657  }
1658  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.is_vector ())
1659  {
1660  // It's actually vector indexing. The 1D index is specialized for that.
1661  retval = index (idx_i);
1662 
1663  // If nr == 1 then the vector indexing will return a column vector!!
1664  if (nr == 1)
1665  retval.transpose ();
1666  }
1667  else if (idx_i.is_scalar ())
1668  {
1669  octave_idx_type ii = idx_i(0);
1670  retval = Sparse<T> (1, m);
1672  for (octave_idx_type j = 0; j < m; j++)
1673  {
1674  octave_quit ();
1675  octave_idx_type jj = idx_j(j);
1676  octave_idx_type lj = cidx (jj);
1677  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1678 
1679  // Scalar index - just a binary lookup.
1680  octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
1681  if (i < nzj && ridx (i+lj) == ii)
1682  {
1683  ij[j] = i + lj;
1684  retval.xcidx (j+1) = retval.xcidx (j) + 1;
1685  }
1686  else
1687  retval.xcidx (j+1) = retval.xcidx (j);
1688  }
1689 
1690  retval.change_capacity (retval.xcidx (m));
1691 
1692  // Copy data, adjust row indices.
1693  for (octave_idx_type j = 0; j < m; j++)
1694  {
1695  octave_idx_type i = retval.xcidx (j);
1696  if (retval.xcidx (j+1) > i)
1697  {
1698  retval.xridx (i) = 0;
1699  retval.xdata (i) = data (ij[j]);
1700  }
1701  }
1702  }
1703  else if (idx_i.is_cont_range (nr, lb, ub))
1704  {
1705  retval = Sparse<T> (n, m);
1708  for (octave_idx_type j = 0; j < m; j++)
1709  {
1710  octave_quit ();
1711  octave_idx_type jj = idx_j(j);
1712  octave_idx_type lj = cidx (jj);
1713  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1714  octave_idx_type lij, uij;
1715 
1716  // Lookup indices.
1717  li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
1718  ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
1719  retval.xcidx (j+1) = retval.xcidx (j) + ui[j] - li[j];
1720  }
1721 
1722  retval.change_capacity (retval.xcidx (m));
1723 
1724  // Copy data, adjust row indices.
1725  for (octave_idx_type j = 0, k = 0; j < m; j++)
1726  {
1727  octave_quit ();
1728  for (octave_idx_type i = li[j]; i < ui[j]; i++)
1729  {
1730  retval.xdata (k) = data (i);
1731  retval.xridx (k++) = ridx (i) - lb;
1732  }
1733  }
1734  }
1735  else if (idx_i.is_permutation (nr))
1736  {
1737  // The columns preserve their length, just need to renumber and sort them.
1738  // Count new nonzero elements.
1739  retval = Sparse<T> (nr, m);
1740  for (octave_idx_type j = 0; j < m; j++)
1741  {
1742  octave_idx_type jj = idx_j(j);
1743  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1744  }
1745 
1746  retval.change_capacity (retval.xcidx (m));
1747 
1748  octave_quit ();
1749 
1750  if (idx_i.is_range () && idx_i.increment () == -1)
1751  {
1752  // It's nr:-1:1. Just flip all columns.
1753  for (octave_idx_type j = 0; j < m; j++)
1754  {
1755  octave_quit ();
1756  octave_idx_type jj = idx_j(j);
1757  octave_idx_type lj = cidx (jj);
1758  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1759  octave_idx_type li = retval.xcidx (j);
1760  octave_idx_type uj = lj + nzj - 1;
1761  for (octave_idx_type i = 0; i < nzj; i++)
1762  {
1763  retval.xdata (li + i) = data (uj - i); // Copy in reverse order.
1764  retval.xridx (li + i) = nr - 1 - ridx (uj - i); // Ditto with transform.
1765  }
1766  }
1767  }
1768  else
1769  {
1770  // Get inverse permutation.
1771  idx_vector idx_iinv = idx_i.inverse_permutation (nr);
1772  const octave_idx_type *iinv = idx_iinv.raw ();
1773 
1774  // Scatter buffer.
1775  OCTAVE_LOCAL_BUFFER (T, scb, nr);
1776  octave_idx_type *rri = retval.ridx ();
1777 
1778  for (octave_idx_type j = 0; j < m; j++)
1779  {
1780  octave_quit ();
1781  octave_idx_type jj = idx_j(j);
1782  octave_idx_type lj = cidx (jj);
1783  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1784  octave_idx_type li = retval.xcidx (j);
1785  // Scatter the column, transform indices.
1786  for (octave_idx_type i = 0; i < nzj; i++)
1787  scb[rri[li + i] = iinv[ridx (lj + i)]] = data (lj + i);
1788 
1789  octave_quit ();
1790 
1791  // Sort them.
1792  std::sort (rri + li, rri + li + nzj);
1793 
1794  // Gather.
1795  for (octave_idx_type i = 0; i < nzj; i++)
1796  retval.xdata (li + i) = scb[rri[li + i]];
1797  }
1798  }
1799 
1800  }
1801  else if (idx_j.is_colon ())
1802  {
1803  // This requires uncompressing columns, which is generally costly,
1804  // so we rely on the efficient transpose to handle this.
1805  // It may still make sense to optimize some cases here.
1806  retval = transpose ();
1807  retval = retval.index (idx_vector::colon, idx_i);
1808  retval = retval.transpose ();
1809  }
1810  else
1811  {
1812  // A(I, J) is decomposed into A(:, J)(I, :).
1813  retval = index (idx_vector::colon, idx_j);
1814  retval = retval.index (idx_i, idx_vector::colon);
1815  }
1816 
1817  return retval;
1818 }
1819 
1820 template <typename T>
1821 void
1822 Sparse<T>::assign (const idx_vector& idx, const Sparse<T>& rhs)
1823 {
1824  Sparse<T> retval;
1825 
1826  assert (ndims () == 2);
1827 
1828  octave_idx_type nr = dim1 ();
1829  octave_idx_type nc = dim2 ();
1830  octave_idx_type nz = nnz ();
1831 
1832  octave_idx_type n = numel (); // Can throw.
1833 
1834  octave_idx_type rhl = rhs.numel ();
1835 
1836  if (idx.length (n) == rhl)
1837  {
1838  if (rhl == 0)
1839  return;
1840 
1841  octave_idx_type nx = idx.extent (n);
1842  // Try to resize first if necessary.
1843  if (nx != n)
1844  {
1845  resize1 (nx);
1846  n = numel ();
1847  nr = rows ();
1848  nc = cols ();
1849  // nz is preserved.
1850  }
1851 
1852  if (idx.is_colon ())
1853  {
1854  *this = rhs.reshape (dimensions);
1855  }
1856  else if (nc == 1 && rhs.cols () == 1)
1857  {
1858  // Sparse column vector to sparse column vector assignment.
1859 
1860  octave_idx_type lb, ub;
1861  if (idx.is_cont_range (nr, lb, ub))
1862  {
1863  // Special-case a contiguous range.
1864  // Look-up indices first.
1865  octave_idx_type li = lblookup (ridx (), nz, lb);
1866  octave_idx_type ui = lblookup (ridx (), nz, ub);
1867  octave_idx_type rnz = rhs.nnz ();
1868  octave_idx_type new_nz = nz - (ui - li) + rnz;
1869 
1870  if (new_nz >= nz && new_nz <= nzmax ())
1871  {
1872  // Adding/overwriting elements, enough capacity allocated.
1873 
1874  if (new_nz > nz)
1875  {
1876  // Make room first.
1877  std::copy_backward (data () + ui, data () + nz,
1878  data () + nz + rnz);
1879  std::copy_backward (ridx () + ui, ridx () + nz,
1880  ridx () + nz + rnz);
1881  }
1882 
1883  // Copy data and adjust indices from rhs.
1884  std::copy (rhs.data (), rhs.data () + rnz, data () + li);
1885  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1886  }
1887  else
1888  {
1889  // Clearing elements or exceeding capacity, allocate afresh
1890  // and paste pieces.
1891  const Sparse<T> tmp = *this;
1892  *this = Sparse<T> (nr, 1, new_nz);
1893 
1894  // Head ...
1895  std::copy (tmp.data (), tmp.data () + li, data ());
1896  std::copy (tmp.ridx (), tmp.ridx () + li, ridx ());
1897 
1898  // new stuff ...
1899  std::copy (rhs.data (), rhs.data () + rnz, data () + li);
1900  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1901 
1902  // ...tail
1903  std::copy (tmp.data () + ui, tmp.data () + nz,
1904  data () + li + rnz);
1905  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
1906  ridx () + li + rnz);
1907  }
1908 
1909  cidx (1) = new_nz;
1910  }
1911  else if (idx.is_range () && idx.increment () == -1)
1912  {
1913  // It's s(u:-1:l) = r. Reverse the assignment.
1914  assign (idx.sorted (), rhs.index (idx_vector (rhl - 1, 0, -1)));
1915  }
1916  else if (idx.is_permutation (n))
1917  {
1918  *this = rhs.index (idx.inverse_permutation (n));
1919  }
1920  else if (rhs.nnz () == 0)
1921  {
1922  // Elements are being zeroed.
1923  octave_idx_type *ri = ridx ();
1924  for (octave_idx_type i = 0; i < rhl; i++)
1925  {
1926  octave_idx_type iidx = idx(i);
1927  octave_idx_type li = lblookup (ri, nz, iidx);
1928  if (li != nz && ri[li] == iidx)
1929  xdata (li) = T ();
1930  }
1931 
1932  maybe_compress (true);
1933  }
1934  else
1935  {
1936  const Sparse<T> tmp = *this;
1937  octave_idx_type new_nz = nz + rhl;
1938  // Disassembly our matrix...
1939  Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
1940  Array<T> new_data (dim_vector (new_nz, 1));
1941  std::copy (tmp.ridx (), tmp.ridx () + nz, new_ri.fortran_vec ());
1942  std::copy (tmp.data (), tmp.data () + nz, new_data.fortran_vec ());
1943  // ... insert new data (densified) ...
1944  idx.copy_data (new_ri.fortran_vec () + nz);
1945  new_data.assign (idx_vector (nz, new_nz), rhs.array_value ());
1946  // ... reassembly.
1947  *this = Sparse<T> (new_data, new_ri,
1948  static_cast<octave_idx_type> (0),
1949  nr, nc, false);
1950  }
1951  }
1952  else
1953  {
1954  dim_vector save_dims = dimensions;
1955  *this = index (idx_vector::colon);
1956  assign (idx, rhs.index (idx_vector::colon));
1957  *this = reshape (save_dims);
1958  }
1959  }
1960  else if (rhl == 1)
1961  {
1962  rhl = idx.length (n);
1963  if (rhs.nnz () != 0)
1964  assign (idx, Sparse<T> (rhl, 1, rhs.data (0)));
1965  else
1966  assign (idx, Sparse<T> (rhl, 1));
1967  }
1968  else
1969  octave::err_nonconformant ("=", dim_vector(idx.length (n),1), rhs.dims());
1970 }
1971 
1972 template <typename T>
1973 void
1975  const idx_vector& idx_j, const Sparse<T>& rhs)
1976 {
1977  Sparse<T> retval;
1978 
1979  assert (ndims () == 2);
1980 
1981  octave_idx_type nr = dim1 ();
1982  octave_idx_type nc = dim2 ();
1983  octave_idx_type nz = nnz ();
1984 
1985  octave_idx_type n = rhs.rows ();
1986  octave_idx_type m = rhs.columns ();
1987 
1988  // FIXME: this should probably be written more like the
1989  // Array<T>::assign function...
1990 
1991  bool orig_zero_by_zero = (nr == 0 && nc == 0);
1992 
1993  if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
1994  {
1995  octave_idx_type nrx;
1996  octave_idx_type ncx;
1997 
1998  if (orig_zero_by_zero)
1999  {
2000  if (idx_i.is_colon ())
2001  {
2002  nrx = n;
2003 
2004  if (idx_j.is_colon ())
2005  ncx = m;
2006  else
2007  ncx = idx_j.extent (nc);
2008  }
2009  else if (idx_j.is_colon ())
2010  {
2011  nrx = idx_i.extent (nr);
2012  ncx = m;
2013  }
2014  else
2015  {
2016  nrx = idx_i.extent (nr);
2017  ncx = idx_j.extent (nc);
2018  }
2019  }
2020  else
2021  {
2022  nrx = idx_i.extent (nr);
2023  ncx = idx_j.extent (nc);
2024  }
2025 
2026  // Try to resize first if necessary.
2027  if (nrx != nr || ncx != nc)
2028  {
2029  resize (nrx, ncx);
2030  nr = rows ();
2031  nc = cols ();
2032  // nz is preserved.
2033  }
2034 
2035  if (n == 0 || m == 0)
2036  return;
2037 
2038  if (idx_i.is_colon ())
2039  {
2040  octave_idx_type lb, ub;
2041  // Great, we're just manipulating columns. This is going to be quite
2042  // efficient, because the columns can stay compressed as they are.
2043  if (idx_j.is_colon ())
2044  *this = rhs; // Shallow copy.
2045  else if (idx_j.is_cont_range (nc, lb, ub))
2046  {
2047  // Special-case a contiguous range.
2048  octave_idx_type li = cidx (lb);
2049  octave_idx_type ui = cidx (ub);
2050  octave_idx_type rnz = rhs.nnz ();
2051  octave_idx_type new_nz = nz - (ui - li) + rnz;
2052 
2053  if (new_nz >= nz && new_nz <= nzmax ())
2054  {
2055  // Adding/overwriting elements, enough capacity allocated.
2056 
2057  if (new_nz > nz)
2058  {
2059  // Make room first.
2060  std::copy_backward (data () + ui, data () + nz,
2061  data () + new_nz);
2062  std::copy_backward (ridx () + ui, ridx () + nz,
2063  ridx () + new_nz);
2064  mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
2065  }
2066 
2067  // Copy data and indices from rhs.
2068  std::copy (rhs.data (), rhs.data () + rnz, data () + li);
2069  std::copy (rhs.ridx (), rhs.ridx () + rnz, ridx () + li);
2070  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2071  li);
2072 
2073  assert (nnz () == new_nz);
2074  }
2075  else
2076  {
2077  // Clearing elements or exceeding capacity, allocate afresh
2078  // and paste pieces.
2079  const Sparse<T> tmp = *this;
2080  *this = Sparse<T> (nr, nc, new_nz);
2081 
2082  // Head...
2083  std::copy (tmp.data (), tmp.data () + li, data ());
2084  std::copy (tmp.ridx (), tmp.ridx () + li, ridx ());
2085  std::copy (tmp.cidx () + 1, tmp.cidx () + 1 + lb, cidx () + 1);
2086 
2087  // new stuff...
2088  std::copy (rhs.data (), rhs.data () + rnz, data () + li);
2089  std::copy (rhs.ridx (), rhs.ridx () + rnz, ridx () + li);
2090  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2091  li);
2092 
2093  // ...tail.
2094  std::copy (tmp.data () + ui, tmp.data () + nz,
2095  data () + li + rnz);
2096  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
2097  ridx () + li + rnz);
2098  mx_inline_add (nc - ub, cidx () + ub + 1,
2099  tmp.cidx () + ub + 1, new_nz - nz);
2100 
2101  assert (nnz () == new_nz);
2102  }
2103  }
2104  else if (idx_j.is_range () && idx_j.increment () == -1)
2105  {
2106  // It's s(:,u:-1:l) = r. Reverse the assignment.
2107  assign (idx_i, idx_j.sorted (),
2108  rhs.index (idx_i, idx_vector (m - 1, 0, -1)));
2109  }
2110  else if (idx_j.is_permutation (nc))
2111  {
2112  *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
2113  }
2114  else
2115  {
2116  const Sparse<T> tmp = *this;
2117  *this = Sparse<T> (nr, nc);
2118  OCTAVE_LOCAL_BUFFER_INIT (octave_idx_type, jsav, nc, -1);
2119 
2120  // Assemble column lengths.
2121  for (octave_idx_type i = 0; i < nc; i++)
2122  xcidx (i+1) = tmp.cidx (i+1) - tmp.cidx (i);
2123 
2124  for (octave_idx_type i = 0; i < m; i++)
2125  {
2126  octave_idx_type j =idx_j(i);
2127  jsav[j] = i;
2128  xcidx (j+1) = rhs.cidx (i+1) - rhs.cidx (i);
2129  }
2130 
2131  // Make cumulative.
2132  for (octave_idx_type i = 0; i < nc; i++)
2133  xcidx (i+1) += xcidx (i);
2134 
2135  change_capacity (nnz ());
2136 
2137  // Merge columns.
2138  for (octave_idx_type i = 0; i < nc; i++)
2139  {
2140  octave_idx_type l = xcidx (i);
2141  octave_idx_type u = xcidx (i+1);
2142  octave_idx_type j = jsav[i];
2143  if (j >= 0)
2144  {
2145  // from rhs
2146  octave_idx_type k = rhs.cidx (j);
2147  std::copy (rhs.data () + k, rhs.data () + k + u - l,
2148  xdata () + l);
2149  std::copy (rhs.ridx () + k, rhs.ridx () + k + u - l,
2150  xridx () + l);
2151  }
2152  else
2153  {
2154  // original
2155  octave_idx_type k = tmp.cidx (i);
2156  std::copy (tmp.data () + k, tmp.data () + k + u - l,
2157  xdata () + l);
2158  std::copy (tmp.ridx () + k, tmp.ridx () + k + u - l,
2159  xridx () + l);
2160  }
2161  }
2162 
2163  }
2164  }
2165  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.is_vector ())
2166  {
2167  // It's just vector indexing. The 1D assign is specialized for that.
2168  assign (idx_i, rhs);
2169  }
2170  else if (idx_j.is_colon ())
2171  {
2172  if (idx_i.is_permutation (nr))
2173  {
2174  *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
2175  }
2176  else
2177  {
2178  // FIXME: optimize more special cases?
2179  // In general this requires unpacking the columns, which is slow,
2180  // especially for many small columns. OTOH, transpose is an
2181  // efficient O(nr+nc+nnz) operation.
2182  *this = transpose ();
2183  assign (idx_vector::colon, idx_i, rhs.transpose ());
2184  *this = transpose ();
2185  }
2186  }
2187  else
2188  {
2189  // Split it into 2 assignments and one indexing.
2190  Sparse<T> tmp = index (idx_vector::colon, idx_j);
2191  tmp.assign (idx_i, idx_vector::colon, rhs);
2192  assign (idx_vector::colon, idx_j, tmp);
2193  }
2194  }
2195  else if (m == 1 && n == 1)
2196  {
2197  n = idx_i.length (nr);
2198  m = idx_j.length (nc);
2199  if (rhs.nnz () != 0)
2200  assign (idx_i, idx_j, Sparse<T> (n, m, rhs.data (0)));
2201  else
2202  assign (idx_i, idx_j, Sparse<T> (n, m));
2203  }
2204  else if (idx_i.length (nr) == m && idx_j.length (nc) == n
2205  && (n == 1 || m == 1))
2206  {
2207  assign (idx_i, idx_j, rhs.transpose ());
2208  }
2209  else
2210  octave::err_nonconformant ("=", idx_i.length (nr), idx_j.length (nc), n, m);
2211 }
2212 
2213 // Can't use versions of these in Array.cc due to duplication of the
2214 // instantiations for Array<double and Sparse<double>, etc
2215 template <typename T>
2216 bool
2218  typename ref_param<T>::type b)
2219 {
2220  return (a < b);
2221 }
2222 
2223 template <typename T>
2224 bool
2226  typename ref_param<T>::type b)
2227 {
2228  return (a > b);
2229 }
2230 
2231 template <typename T>
2232 Sparse<T>
2234 {
2235  Sparse<T> m = *this;
2236 
2237  octave_idx_type nr = m.rows ();
2238  octave_idx_type nc = m.columns ();
2239 
2240  if (m.numel () < 1 || dim > 1)
2241  return m;
2242 
2243  if (dim > 0)
2244  {
2245  m = m.transpose ();
2246  nr = m.rows ();
2247  nc = m.columns ();
2248  }
2249 
2250  octave_sort<T> lsort;
2251 
2252  if (mode == ASCENDING)
2253  lsort.set_compare (sparse_ascending_compare<T>);
2254  else if (mode == DESCENDING)
2255  lsort.set_compare (sparse_descending_compare<T>);
2256  else
2257  abort ();
2258 
2259  T *v = m.data ();
2260  octave_idx_type *mcidx = m.cidx ();
2261  octave_idx_type *mridx = m.ridx ();
2262 
2263  for (octave_idx_type j = 0; j < nc; j++)
2264  {
2265  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2266  lsort.sort (v, ns);
2267 
2269  if (mode == ASCENDING)
2270  {
2271  for (i = 0; i < ns; i++)
2272  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2273  break;
2274  }
2275  else
2276  {
2277  for (i = 0; i < ns; i++)
2278  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2279  break;
2280  }
2281  for (octave_idx_type k = 0; k < i; k++)
2282  mridx[k] = k;
2283  for (octave_idx_type k = i; k < ns; k++)
2284  mridx[k] = k - ns + nr;
2285 
2286  v += ns;
2287  mridx += ns;
2288  }
2289 
2290  if (dim > 0)
2291  m = m.transpose ();
2292 
2293  return m;
2294 }
2295 
2296 template <typename T>
2297 Sparse<T>
2299  sortmode mode) const
2300 {
2301  Sparse<T> m = *this;
2302 
2303  octave_idx_type nr = m.rows ();
2304  octave_idx_type nc = m.columns ();
2305 
2306  if (m.numel () < 1 || dim > 1)
2307  {
2308  sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
2309  return m;
2310  }
2311 
2312  if (dim > 0)
2313  {
2314  m = m.transpose ();
2315  nr = m.rows ();
2316  nc = m.columns ();
2317  }
2318 
2319  octave_sort<T> indexed_sort;
2320 
2321  if (mode == ASCENDING)
2322  indexed_sort.set_compare (sparse_ascending_compare<T>);
2323  else if (mode == DESCENDING)
2324  indexed_sort.set_compare (sparse_descending_compare<T>);
2325  else
2326  abort ();
2327 
2328  T *v = m.data ();
2329  octave_idx_type *mcidx = m.cidx ();
2330  octave_idx_type *mridx = m.ridx ();
2331 
2332  sidx = Array<octave_idx_type> (dim_vector (nr, nc));
2334 
2335  for (octave_idx_type j = 0; j < nc; j++)
2336  {
2337  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2338  octave_idx_type offset = j * nr;
2339 
2340  if (ns == 0)
2341  {
2342  for (octave_idx_type k = 0; k < nr; k++)
2343  sidx (offset + k) = k;
2344  }
2345  else
2346  {
2347  for (octave_idx_type i = 0; i < ns; i++)
2348  vi[i] = mridx[i];
2349 
2350  indexed_sort.sort (v, vi, ns);
2351 
2353  if (mode == ASCENDING)
2354  {
2355  for (i = 0; i < ns; i++)
2356  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2357  break;
2358  }
2359  else
2360  {
2361  for (i = 0; i < ns; i++)
2362  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2363  break;
2364  }
2365 
2366  octave_idx_type ii = 0;
2367  octave_idx_type jj = i;
2368  for (octave_idx_type k = 0; k < nr; k++)
2369  {
2370  if (ii < ns && mridx[ii] == k)
2371  ii++;
2372  else
2373  sidx (offset + jj++) = k;
2374  }
2375 
2376  for (octave_idx_type k = 0; k < i; k++)
2377  {
2378  sidx (k + offset) = vi[k];
2379  mridx[k] = k;
2380  }
2381 
2382  for (octave_idx_type k = i; k < ns; k++)
2383  {
2384  sidx (k - ns + nr + offset) = vi[k];
2385  mridx[k] = k - ns + nr;
2386  }
2387 
2388  v += ns;
2389  mridx += ns;
2390  }
2391  }
2392 
2393  if (dim > 0)
2394  {
2395  m = m.transpose ();
2396  sidx = sidx.transpose ();
2397  }
2398 
2399  return m;
2400 }
2401 
2402 template <typename T>
2403 Sparse<T>
2405 {
2406  octave_idx_type nnr = rows ();
2407  octave_idx_type nnc = cols ();
2408  Sparse<T> d;
2409 
2410  if (nnr == 0 || nnc == 0)
2411  ; // do nothing
2412  else if (nnr != 1 && nnc != 1)
2413  {
2414  if (k > 0)
2415  nnc -= k;
2416  else if (k < 0)
2417  nnr += k;
2418 
2419  if (nnr > 0 && nnc > 0)
2420  {
2421  octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2422 
2423  // Count the number of nonzero elements
2424  octave_idx_type nel = 0;
2425  if (k > 0)
2426  {
2427  for (octave_idx_type i = 0; i < ndiag; i++)
2428  if (elem (i, i+k) != 0.)
2429  nel++;
2430  }
2431  else if (k < 0)
2432  {
2433  for (octave_idx_type i = 0; i < ndiag; i++)
2434  if (elem (i-k, i) != 0.)
2435  nel++;
2436  }
2437  else
2438  {
2439  for (octave_idx_type i = 0; i < ndiag; i++)
2440  if (elem (i, i) != 0.)
2441  nel++;
2442  }
2443 
2444  d = Sparse<T> (ndiag, 1, nel);
2445  d.xcidx (0) = 0;
2446  d.xcidx (1) = nel;
2447 
2448  octave_idx_type ii = 0;
2449  if (k > 0)
2450  {
2451  for (octave_idx_type i = 0; i < ndiag; i++)
2452  {
2453  T tmp = elem (i, i+k);
2454  if (tmp != 0.)
2455  {
2456  d.xdata (ii) = tmp;
2457  d.xridx (ii++) = i;
2458  }
2459  }
2460  }
2461  else if (k < 0)
2462  {
2463  for (octave_idx_type i = 0; i < ndiag; i++)
2464  {
2465  T tmp = elem (i-k, i);
2466  if (tmp != 0.)
2467  {
2468  d.xdata (ii) = tmp;
2469  d.xridx (ii++) = i;
2470  }
2471  }
2472  }
2473  else
2474  {
2475  for (octave_idx_type i = 0; i < ndiag; i++)
2476  {
2477  T tmp = elem (i, i);
2478  if (tmp != 0.)
2479  {
2480  d.xdata (ii) = tmp;
2481  d.xridx (ii++) = i;
2482  }
2483  }
2484  }
2485  }
2486  else
2487  {
2488  // Matlab returns [] 0x1 for out-of-range diagonal
2489 
2490  octave_idx_type nr = 0;
2491  octave_idx_type nc = 1;
2492  octave_idx_type nz = 0;
2493 
2494  d = Sparse<T> (nr, nc, nz);
2495  }
2496  }
2497  else if (nnr != 0 && nnc != 0)
2498  {
2499  octave_idx_type roff = 0;
2500  octave_idx_type coff = 0;
2501  if (k > 0)
2502  {
2503  roff = 0;
2504  coff = k;
2505  }
2506  else if (k < 0)
2507  {
2508  roff = -k;
2509  coff = 0;
2510  }
2511 
2512  if (nnr == 1)
2513  {
2514  octave_idx_type n = nnc + std::abs (k);
2515  octave_idx_type nz = nnz ();
2516 
2517  d = Sparse<T> (n, n, nz);
2518 
2519  if (nnz () > 0)
2520  {
2521  for (octave_idx_type i = 0; i < coff+1; i++)
2522  d.xcidx (i) = 0;
2523 
2524  for (octave_idx_type j = 0; j < nnc; j++)
2525  {
2526  for (octave_idx_type i = cidx (j); i < cidx (j+1); i++)
2527  {
2528  d.xdata (i) = data (i);
2529  d.xridx (i) = j + roff;
2530  }
2531  d.xcidx (j + coff + 1) = cidx (j+1);
2532  }
2533 
2534  for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
2535  d.xcidx (i) = nz;
2536  }
2537  }
2538  else
2539  {
2540  octave_idx_type n = nnr + std::abs (k);
2541  octave_idx_type nz = nnz ();
2542 
2543  d = Sparse<T> (n, n, nz);
2544 
2545  if (nnz () > 0)
2546  {
2547  octave_idx_type ii = 0;
2548  octave_idx_type ir = ridx (0);
2549 
2550  for (octave_idx_type i = 0; i < coff+1; i++)
2551  d.xcidx (i) = 0;
2552 
2553  for (octave_idx_type i = 0; i < nnr; i++)
2554  {
2555  if (ir == i)
2556  {
2557  d.xdata (ii) = data (ii);
2558  d.xridx (ii++) = ir + roff;
2559 
2560  if (ii != nz)
2561  ir = ridx (ii);
2562  }
2563  d.xcidx (i + coff + 1) = ii;
2564  }
2565 
2566  for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
2567  d.xcidx (i) = nz;
2568  }
2569  }
2570  }
2571 
2572  return d;
2573 }
2574 
2575 template <typename T>
2576 Sparse<T>
2577 Sparse<T>::cat (int dim, octave_idx_type n, const Sparse<T> *sparse_list)
2578 {
2579  // Default concatenation.
2580  bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2581 
2582  if (dim == -1 || dim == -2)
2583  {
2584  concat_rule = &dim_vector::hvcat;
2585  dim = -dim - 1;
2586  }
2587  else if (dim < 0)
2588  (*current_liboctave_error_handler) ("cat: invalid dimension");
2589 
2590  dim_vector dv;
2591  octave_idx_type total_nz = 0;
2592  if (dim != 0 && dim != 1)
2593  (*current_liboctave_error_handler)
2594  ("cat: invalid dimension for sparse concatenation");
2595 
2596  if (n == 1)
2597  return sparse_list[0];
2598 
2599  for (octave_idx_type i = 0; i < n; i++)
2600  {
2601  if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
2602  (*current_liboctave_error_handler) ("cat: dimension mismatch");
2603 
2604  total_nz += sparse_list[i].nnz ();
2605  }
2606 
2607  Sparse<T> retval (dv, total_nz);
2608 
2609  if (retval.is_empty ())
2610  return retval;
2611 
2612  switch (dim)
2613  {
2614  case 0:
2615  {
2616  // sparse vertcat. This is not efficiently handled by assignment,
2617  // so we'll do it directly.
2618  octave_idx_type l = 0;
2619  for (octave_idx_type j = 0; j < dv(1); j++)
2620  {
2621  octave_quit ();
2622 
2623  octave_idx_type rcum = 0;
2624  for (octave_idx_type i = 0; i < n; i++)
2625  {
2626  const Sparse<T>& spi = sparse_list[i];
2627  // Skipping empty matrices. See the comment in Array.cc.
2628  if (spi.is_empty ())
2629  continue;
2630 
2631  octave_idx_type kl = spi.cidx (j);
2632  octave_idx_type ku = spi.cidx (j+1);
2633  for (octave_idx_type k = kl; k < ku; k++, l++)
2634  {
2635  retval.xridx (l) = spi.ridx (k) + rcum;
2636  retval.xdata (l) = spi.data (k);
2637  }
2638 
2639  rcum += spi.rows ();
2640  }
2641 
2642  retval.xcidx (j+1) = l;
2643  }
2644 
2645  break;
2646  }
2647  case 1:
2648  {
2649  octave_idx_type l = 0;
2650  for (octave_idx_type i = 0; i < n; i++)
2651  {
2652  octave_quit ();
2653 
2654  // Skipping empty matrices. See the comment in Array.cc.
2655  if (sparse_list[i].is_empty ())
2656  continue;
2657 
2658  octave_idx_type u = l + sparse_list[i].columns ();
2659  retval.assign (idx_vector::colon, idx_vector (l, u),
2660  sparse_list[i]);
2661  l = u;
2662  }
2663 
2664  break;
2665  }
2666  default:
2667  assert (false);
2668  }
2669 
2670  return retval;
2671 }
2672 
2673 template <typename T>
2674 Array<T>
2676 {
2677  NoAlias< Array<T> > retval (dims (), T ());
2678  if (rows () == 1)
2679  {
2680  octave_idx_type i = 0;
2681  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2682  {
2683  if (cidx (j+1) > i)
2684  retval(j) = data (i++);
2685  }
2686  }
2687  else
2688  {
2689  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2690  for (octave_idx_type i = cidx (j), iu = cidx (j+1); i < iu; i++)
2691  retval(ridx (i), j) = data (i);
2692  }
2693 
2694  return retval;
2695 }
2696 
2697 template <typename T>
2698 std::istream&
2700  T (*read_fcn) (std::istream&))
2701 {
2702  octave_idx_type nr = a.rows ();
2703  octave_idx_type nc = a.cols ();
2704  octave_idx_type nz = a.nzmax ();
2705 
2706  if (nr > 0 && nc > 0)
2707  {
2708  octave_idx_type itmp;
2709  octave_idx_type jtmp;
2710  octave_idx_type iold = 0;
2711  octave_idx_type jold = 0;
2712  octave_idx_type ii = 0;
2713  T tmp;
2714 
2715  a.cidx (0) = 0;
2716  for (octave_idx_type i = 0; i < nz; i++)
2717  {
2718  itmp = 0; jtmp = 0;
2719  is >> itmp;
2720  itmp--;
2721 
2722  is >> jtmp;
2723  jtmp--;
2724 
2725  if (is.fail ())
2726  {
2727  is.clear();
2728  std::string err_field;
2729  is >> err_field;
2730  (*current_liboctave_error_handler)
2731  ("invalid sparse matrix: element %d: "
2732  "Symbols '%s' is not an integer format",
2733  i+1, err_field.c_str ());
2734  }
2735 
2736  if (itmp < 0 || itmp >= nr)
2737  {
2738  is.setstate (std::ios::failbit);
2739 
2740  (*current_liboctave_error_handler)
2741  ("invalid sparse matrix: element %d: "
2742  "row index = %d out of range",
2743  i+1, itmp + 1);
2744  }
2745 
2746  if (jtmp < 0 || jtmp >= nc)
2747  {
2748  is.setstate (std::ios::failbit);
2749 
2750  (*current_liboctave_error_handler)
2751  ("invalid sparse matrix: element %d: "
2752  "column index = %d out of range",
2753  i+1, jtmp + 1);
2754  }
2755 
2756  if (jtmp < jold)
2757  {
2758  is.setstate (std::ios::failbit);
2759 
2760  (*current_liboctave_error_handler)
2761  ("invalid sparse matrix: element %d:"
2762  "column indices must appear in ascending order (%d < %d)",
2763  i+1, jtmp, jold);
2764  }
2765  else if (jtmp > jold)
2766  {
2767  for (octave_idx_type j = jold; j < jtmp; j++)
2768  a.cidx (j+1) = ii;
2769  }
2770  else if (itmp < iold)
2771  {
2772  is.setstate (std::ios::failbit);
2773 
2774  (*current_liboctave_error_handler)
2775  ("invalid sparse matrix: element %d: "
2776  "row indices must appear in ascending order in each column "
2777  "(%d < %d)",
2778  i+1, iold, itmp);
2779  }
2780 
2781  iold = itmp;
2782  jold = jtmp;
2783 
2784  tmp = read_fcn (is);
2785 
2786  if (! is)
2787  return is; // Problem, return is in error state
2788 
2789  a.data (ii) = tmp;
2790  a.ridx (ii++) = itmp;
2791  }
2792 
2793  for (octave_idx_type j = jold; j < nc; j++)
2794  a.cidx (j+1) = ii;
2795  }
2796 
2797  return is;
2798 }
2799 
2800 /*
2801  * Tests
2802  *
2803 
2804 %!function x = set_slice (x, dim, slice, arg)
2805 %! switch (dim)
2806 %! case 11
2807 %! x(slice) = 2;
2808 %! case 21
2809 %! x(slice, :) = 2;
2810 %! case 22
2811 %! x(:, slice) = 2;
2812 %! otherwise
2813 %! error ("invalid dim, '%d'", dim);
2814 %! endswitch
2815 %!endfunction
2816 
2817 %!function x = set_slice2 (x, dim, slice)
2818 %! switch (dim)
2819 %! case 11
2820 %! x(slice) = 2 * ones (size (slice));
2821 %! case 21
2822 %! x(slice, :) = 2 * ones (length (slice), columns (x));
2823 %! case 22
2824 %! x(:, slice) = 2 * ones (rows (x), length (slice));
2825 %! otherwise
2826 %! error ("invalid dim, '%d'", dim);
2827 %! endswitch
2828 %!endfunction
2829 
2830 %!function test_sparse_slice (size, dim, slice)
2831 %! x = ones (size);
2832 %! s = set_slice (sparse (x), dim, slice);
2833 %! f = set_slice (x, dim, slice);
2834 %! assert (nnz (s), nnz (f));
2835 %! assert (full (s), f);
2836 %! s = set_slice2 (sparse (x), dim, slice);
2837 %! f = set_slice2 (x, dim, slice);
2838 %! assert (nnz (s), nnz (f));
2839 %! assert (full (s), f);
2840 %!endfunction
2841 
2842 #### 1d indexing
2843 
2844 ## size = [2 0]
2845 %!test test_sparse_slice ([2 0], 11, []);
2846 %!assert (set_slice (sparse (ones ([2 0])), 11, 1), sparse ([2 0]')) # sparse different from full
2847 %!assert (set_slice (sparse (ones ([2 0])), 11, 2), sparse ([0 2]')) # sparse different from full
2848 %!assert (set_slice (sparse (ones ([2 0])), 11, 3), sparse ([0 0; 2 0]')) # sparse different from full
2849 %!assert (set_slice (sparse (ones ([2 0])), 11, 4), sparse ([0 0; 0 2]')) # sparse different from full
2850 
2851 ## size = [0 2]
2852 %!test test_sparse_slice ([0 2], 11, []);
2853 %!assert (set_slice (sparse (ones ([0 2])), 11, 1), sparse ([2 0])) # sparse different from full
2854 %!test test_sparse_slice ([0 2], 11, 2);
2855 %!test test_sparse_slice ([0 2], 11, 3);
2856 %!test test_sparse_slice ([0 2], 11, 4);
2857 %!test test_sparse_slice ([0 2], 11, [4, 4]);
2858 
2859 ## size = [2 1]
2860 %!test test_sparse_slice ([2 1], 11, []);
2861 %!test test_sparse_slice ([2 1], 11, 1);
2862 %!test test_sparse_slice ([2 1], 11, 2);
2863 %!test test_sparse_slice ([2 1], 11, 3);
2864 %!test test_sparse_slice ([2 1], 11, 4);
2865 %!test test_sparse_slice ([2 1], 11, [4, 4]);
2866 
2867 ## size = [1 2]
2868 %!test test_sparse_slice ([1 2], 11, []);
2869 %!test test_sparse_slice ([1 2], 11, 1);
2870 %!test test_sparse_slice ([1 2], 11, 2);
2871 %!test test_sparse_slice ([1 2], 11, 3);
2872 %!test test_sparse_slice ([1 2], 11, 4);
2873 %!test test_sparse_slice ([1 2], 11, [4, 4]);
2874 
2875 ## size = [2 2]
2876 %!test test_sparse_slice ([2 2], 11, []);
2877 %!test test_sparse_slice ([2 2], 11, 1);
2878 %!test test_sparse_slice ([2 2], 11, 2);
2879 %!test test_sparse_slice ([2 2], 11, 3);
2880 %!test test_sparse_slice ([2 2], 11, 4);
2881 %!test test_sparse_slice ([2 2], 11, [4, 4]);
2882 # These 2 errors are the same as in the full case
2883 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 5)
2884 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 6)
2885 
2886 #### 2d indexing
2887 
2888 ## size = [2 0]
2889 %!test test_sparse_slice ([2 0], 21, []);
2890 %!test test_sparse_slice ([2 0], 21, 1);
2891 %!test test_sparse_slice ([2 0], 21, 2);
2892 %!test test_sparse_slice ([2 0], 21, [2,2]);
2893 %!assert (set_slice (sparse (ones ([2 0])), 21, 3), sparse (3,0))
2894 %!assert (set_slice (sparse (ones ([2 0])), 21, 4), sparse (4,0))
2895 %!test test_sparse_slice ([2 0], 22, []);
2896 %!test test_sparse_slice ([2 0], 22, 1);
2897 %!test test_sparse_slice ([2 0], 22, 2);
2898 %!test test_sparse_slice ([2 0], 22, [2,2]);
2899 %!assert (set_slice (sparse (ones ([2 0])), 22, 3), sparse ([0 0 2;0 0 2])) # sparse different from full
2900 %!assert (set_slice (sparse (ones ([2 0])), 22, 4), sparse ([0 0 0 2;0 0 0 2])) # sparse different from full
2901 
2902 ## size = [0 2]
2903 %!test test_sparse_slice ([0 2], 21, []);
2904 %!test test_sparse_slice ([0 2], 21, 1);
2905 %!test test_sparse_slice ([0 2], 21, 2);
2906 %!test test_sparse_slice ([0 2], 21, [2,2]);
2907 %!assert (set_slice (sparse (ones ([0 2])), 21, 3), sparse ([0 0;0 0;2 2])) # sparse different from full
2908 %!assert (set_slice (sparse (ones ([0 2])), 21, 4), sparse ([0 0;0 0;0 0;2 2])) # sparse different from full
2909 %!test test_sparse_slice ([0 2], 22, []);
2910 %!test test_sparse_slice ([0 2], 22, 1);
2911 %!test test_sparse_slice ([0 2], 22, 2);
2912 %!test test_sparse_slice ([0 2], 22, [2,2]);
2913 %!assert (set_slice (sparse (ones ([0 2])), 22, 3), sparse (0,3))
2914 %!assert (set_slice (sparse (ones ([0 2])), 22, 4), sparse (0,4))
2915 
2916 ## size = [2 1]
2917 %!test test_sparse_slice ([2 1], 21, []);
2918 %!test test_sparse_slice ([2 1], 21, 1);
2919 %!test test_sparse_slice ([2 1], 21, 2);
2920 %!test test_sparse_slice ([2 1], 21, [2,2]);
2921 %!test test_sparse_slice ([2 1], 21, 3);
2922 %!test test_sparse_slice ([2 1], 21, 4);
2923 %!test test_sparse_slice ([2 1], 22, []);
2924 %!test test_sparse_slice ([2 1], 22, 1);
2925 %!test test_sparse_slice ([2 1], 22, 2);
2926 %!test test_sparse_slice ([2 1], 22, [2,2]);
2927 %!test test_sparse_slice ([2 1], 22, 3);
2928 %!test test_sparse_slice ([2 1], 22, 4);
2929 
2930 ## size = [1 2]
2931 %!test test_sparse_slice ([1 2], 21, []);
2932 %!test test_sparse_slice ([1 2], 21, 1);
2933 %!test test_sparse_slice ([1 2], 21, 2);
2934 %!test test_sparse_slice ([1 2], 21, [2,2]);
2935 %!test test_sparse_slice ([1 2], 21, 3);
2936 %!test test_sparse_slice ([1 2], 21, 4);
2937 %!test test_sparse_slice ([1 2], 22, []);
2938 %!test test_sparse_slice ([1 2], 22, 1);
2939 %!test test_sparse_slice ([1 2], 22, 2);
2940 %!test test_sparse_slice ([1 2], 22, [2,2]);
2941 %!test test_sparse_slice ([1 2], 22, 3);
2942 %!test test_sparse_slice ([1 2], 22, 4);
2943 
2944 ## size = [2 2]
2945 %!test test_sparse_slice ([2 2], 21, []);
2946 %!test test_sparse_slice ([2 2], 21, 1);
2947 %!test test_sparse_slice ([2 2], 21, 2);
2948 %!test test_sparse_slice ([2 2], 21, [2,2]);
2949 %!test test_sparse_slice ([2 2], 21, 3);
2950 %!test test_sparse_slice ([2 2], 21, 4);
2951 %!test test_sparse_slice ([2 2], 22, []);
2952 %!test test_sparse_slice ([2 2], 22, 1);
2953 %!test test_sparse_slice ([2 2], 22, 2);
2954 %!test test_sparse_slice ([2 2], 22, [2,2]);
2955 %!test test_sparse_slice ([2 2], 22, 3);
2956 %!test test_sparse_slice ([2 2], 22, 4);
2957 
2958 %!assert <35570> (speye (3,1)(3:-1:1), sparse ([0; 0; 1]))
2959 
2960 ## Test removing columns
2961 %!test <36656>
2962 %! s = sparse (magic (5));
2963 %! s(:,2:4) = [];
2964 %! assert (s, sparse (magic (5)(:, [1,5])));
2965 
2966 %!test
2967 %! s = sparse ([], [], [], 1, 1);
2968 %! s(1,:) = [];
2969 %! assert (s, sparse ([], [], [], 0, 1));
2970 
2971 ## Test (bug #37321)
2972 %!test <37321> a=sparse (0,0); assert (all (a) == sparse ([1]));
2973 %!test <37321> a=sparse (0,1); assert (all (a) == sparse ([1]));
2974 %!test <37321> a=sparse (1,0); assert (all (a) == sparse ([1]));
2975 %!test <37321> a=sparse (1,0); assert (all (a,2) == sparse ([1]));
2976 %!test <37321> a=sparse (1,0); assert (size (all (a,1)), [1 0]);
2977 %!test <37321> a=sparse (1,1);
2978 %! assert (all (a) == sparse ([0]));
2979 %! assert (size (all (a)), [1 1]);
2980 %!test <37321> a=sparse (2,1);
2981 %! assert (all (a) == sparse ([0]));
2982 %! assert (size (all (a)), [1 1]);
2983 %!test <37321> a=sparse (1,2);
2984 %! assert (all (a) == sparse ([0]));
2985 %! assert (size (all (a)), [1 1]);
2986 %!test <37321> a=sparse (2,2); assert (isequal (all (a), sparse ([0 0])));
2987 
2988 ## Test assigning row to a column slice
2989 %!test <45589>
2990 %! a = sparse (magic (3));
2991 %! b = a;
2992 %! a(1,:) = 1:3;
2993 %! b(1,:) = (1:3)';
2994 %! assert (a, b);
2995 
2996 */
2997 
2998 template <typename T>
2999 void
3000 Sparse<T>::print_info (std::ostream& os, const std::string& prefix) const
3001 {
3002  os << prefix << "rep address: " << rep << "\n"
3003  << prefix << "rep->nzmx: " << rep->nzmx << "\n"
3004  << prefix << "rep->nrows: " << rep->nrows << "\n"
3005  << prefix << "rep->ncols: " << rep->ncols << "\n"
3006  << prefix << "rep->data: " << static_cast<void *> (rep->d) << "\n"
3007  << prefix << "rep->ridx: " << static_cast<void *> (rep->r) << "\n"
3008  << prefix << "rep->cidx: " << static_cast<void *> (rep->c) << "\n"
3009  << prefix << "rep->count: " << rep->count << "\n";
3010 }
3011 
3012 #define INSTANTIATE_SPARSE(T, API) \
3013  template class API Sparse<T>; \
3014  template std::istream& \
3015  read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3016  T (*read_fcn) (std::istream&));
Array< T > as_matrix(void) const
Return the array as a matrix.
Definition: Array.h:387
T & elem(octave_idx_type _r, octave_idx_type _c)
Definition: Sparse.cc:83
octave_idx_type * xridx(void)
Definition: Sparse.h:536
void mx_inline_add2(size_t n, R *r, const X *x)
Definition: mx-inlines.cc:127
dim_vector dimensions
Definition: Sparse.h:170
octave_idx_type cols(void) const
Definition: Sparse.h:272
octave_idx_type length(octave_idx_type n=0) const
Definition: idx-vector.h:541
std::string str(char sep= 'x') const
Definition: dim-vector.cc:73
OCTAVE_EXPORT octave_value_list return the value of the option it must match the dimension of the state and the relative tolerance must also be a vector of the same length tem it must match the dimension of the state and the absolute tolerance must also be a vector of the same length The local error test applied at each integration step is xample roup calculate Y_a and Y _d item Given calculate Y nd enumerate In either initial values for the given components are and initial guesses for the unknown components must also be provided as input Set this option to to solve the first or to solve the second(the default is 0, so you must provide a set of initial conditions that are consistent).If this option is set to a nonzero value
T * data(void)
Definition: Sparse.h:521
octave_idx_type rows(void) const
Definition: Sparse.h:271
void change_length(octave_idx_type nz)
Definition: Sparse.cc:159
octave_idx_type numel(void) const
Definition: Sparse.h:257
const octave_base_value const Array< octave_idx_type > & ra_idx
Sparse< T > & operator=(const Sparse< T > &a)
Definition: Sparse.cc:692
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:209
dim_vector dims(void) const
Definition: Sparse.h:291
octave_idx_type rows(void) const
Definition: PermMatrix.h:59
static const idx_vector colon
Definition: idx-vector.h:482
sortmode
Definition: oct-sort.h:105
octave_idx_type numel(void) const
Number of elements in the array.
Definition: Array.h:363
identity matrix If supplied two scalar respectively For allows like xample val
Definition: data.cc:5068
Return the CPU time used by your Octave session The first output is the total time spent executing your process and is equal to the sum of second and third which are the number of CPU seconds spent executing in user mode and the number of CPU seconds spent executing in system mode
Definition: data.cc:6386
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition: lo-error.c:38
bool isnan(double x)
Definition: lo-mappers.cc:347
void set_compare(compare_fcn_type comp)
Definition: oct-sort.h:122
octave_idx_type nzmx
Definition: Sparse.h:70
for large enough k
Definition: lu.cc:606
void resize(int n, int fill_value=0)
Definition: dim-vector.h:316
static void transpose(octave_idx_type N, const octave_idx_type *ridx, const octave_idx_type *cidx, octave_idx_type *ridx2, octave_idx_type *cidx2)
Definition: symrcm.cc:382
octave_idx_type * xcidx(void)
Definition: Sparse.h:549
void err_index_out_of_range(int nd, int dim, octave_idx_type idx, octave_idx_type ext)
static Sparse< T >::SparseRep * nil_rep(void)
Definition: Sparse.cc:57
Sparse< T > index(const idx_vector &i, bool resize_ok=false) const
Definition: Sparse.cc:1378
bool any_element_is_nan(void) const
Definition: Sparse.cc:197
Sparse< T > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Definition: Sparse.cc:889
bool sparse_indices_ok(octave_idx_type *r, octave_idx_type *c, octave_idx_type nrows, octave_idx_type ncols, octave_idx_type nnz)
Definition: sparse-util.cc:84
static Sparse< T > cat(int dim, octave_idx_type n, const Sparse< T > *sparse_list)
Definition: Sparse.cc:2577
octave_idx_type * r
Definition: Sparse.h:68
Sparse< T > & insert(const Sparse< T > &a, octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:998
octave_idx_type * cidx(void)
Definition: Sparse.h:543
T & elem(octave_idx_type n)
Definition: Array.h:482
octave_idx_type columns(void) const
Definition: Sparse.h:273
u
Definition: lu.cc:138
bool indices_ok(void) const
Definition: Sparse.cc:190
octave_idx_type * c
Definition: Sparse.h:69
s
Definition: file-io.cc:2682
octave_idx_type rows(void) const
Definition: Array.h:401
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:389
octave_function * fcn
Definition: ov-class.cc:1743
OCTAVE_NORETURN T range_error(const char *fcn, octave_idx_type n) const
Definition: Sparse.cc:733
idx_vector inverse_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1164
F77_RET_T F77_REAL &F77_RET_T F77_DBLE &F77_RET_T F77_REAL &F77_RET_T F77_DBLE &F77_RET_T F77_REAL &F77_RET_T F77_DBLE &F77_RET_T const F77_REAL const F77_REAL F77_REAL &F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE &F77_RET_T F77_REAL &F77_RET_T F77_DBLE &F77_RET_T F77_DBLE &F77_RET_T F77_REAL &F77_RET_T F77_REAL &F77_RET_T F77_DBLE &F77_RET_T const F77_DBLE F77_DBLE &F77_RET_T const F77_REAL F77_REAL &F77_RET_T F77_REAL F77_REAL &F77_RET_T F77_DBLE F77_DBLE &F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
octave_idx_type nnz(void) const
Actual number of nonzero terms.
Definition: Sparse.h:253
calling an anonymous function involves an overhead quite comparable to the overhead of an m file function Passing a handle to a built in function is because the interpreter is not involved in the internal loop For a
Definition: cellfun.cc:398
octave_idx_type nzmax(void) const
Amount of storage for nonzero elements.
Definition: Sparse.h:245
bool sparse_ascending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2217
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:187
void assign(const idx_vector &i, const Sparse< T > &rhs)
Definition: Sparse.cc:1822
void err_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
void maybe_compress(bool remove_zeros)
Definition: Sparse.cc:135
Sparse< T >::SparseRep * rep
Definition: Sparse.h:168
dim_vector orig_dimensions(void) const
Definition: idx-vector.h:583
std::istream & read_sparse_matrix(std::istream &is, Sparse< T > &a, T(*read_fcn)(std::istream &))
Definition: Sparse.cc:2699
T celem(octave_idx_type _r, octave_idx_type _c) const
Definition: Sparse.cc:124
octave_idx_type safe_numel(void) const
Definition: dim-vector.cc:103
void copy_data(octave_idx_type *data) const
Definition: idx-vector.cc:1045
void mx_inline_sub(size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:109
idx_vector complement(octave_idx_type n) const
Definition: idx-vector.cc:1103
void change_capacity(octave_idx_type nz)
Definition: Sparse.h:505
bool sparse_descending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2225
const Array< octave_idx_type > & col_perm_vec(void) const
Definition: PermMatrix.h:79
nd deftypefn *octave_map m
Definition: ov-struct.cc:2058
octave_idx_type ncols
Definition: Sparse.h:72
Sparse< T > sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition: Sparse.cc:2233
Sparse< T > reshape(const dim_vector &new_dims) const
Definition: Sparse.cc:809
Sparse< T > maybe_compress(bool remove_zeros=false)
Definition: Sparse.h:481
void resize(octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:948
Array< T > transpose(void) const
Definition: Array.cc:1616
const T * data(void) const
Definition: Array.h:582
static int elem
Definition: __contourc__.cc:50
bool is_vector(void) const
Definition: idx-vector.cc:1273
double tmp
Definition: data.cc:6300
void err_nonconformant(const char *op, octave_idx_type op1_len, octave_idx_type op2_len)
octave_value retval
Definition: data.cc:6294
dim_vector redim(int n) const
Definition: dim-vector.cc:275
bool is_cont_range(octave_idx_type n, octave_idx_type &l, octave_idx_type &u) const
Definition: idx-vector.cc:948
the sparsity preserving column transformation such that that defines the pivoting threshold can be given in which case it defines the c
Definition: lu.cc:138
bool is_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1135
the exceeded dimensions are set to if fewer subscripts than dimensions are the exceeding dimensions are merged into the final requested dimension For consider the following dims
Definition: sub2ind.cc:255
Sparse(void)
Definition: Sparse.h:178
if_then_else< is_class_type< T >::no, T, T const & >::result type
Definition: lo-traits.h:118
void delete_elements(const idx_vector &i)
Definition: Sparse.cc:1146
Array< octave_idx_type > as_array(void) const
Definition: idx-vector.cc:1267
void err_invalid_resize(void)
bool is_colon_equiv(octave_idx_type n) const
Definition: idx-vector.h:574
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:228
octave_idx_type extent(octave_idx_type n) const
Definition: idx-vector.h:544
bool is_range(void) const
Definition: idx-vector.h:571
T::size_type numel(const T &str)
Definition: oct-string.cc:61
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1506
This is a simple wrapper template that will subclass an Array type or any later type derived from ...
Definition: Array.h:888
octave_idx_type * ridx(void)
Definition: Sparse.h:530
bool is_empty(void) const
Definition: Sparse.h:517
Compressed Column Sparse(rows=3, cols=4, nnz=2[17%])(1
void resize1(octave_idx_type n)
Definition: Sparse.cc:917
=val(i)}if ode{val(i)}occurs in table i
Definition: lookup.cc:239
octave_idx_type compute_index(const Array< octave_idx_type > &ra_idx) const
Definition: Sparse.cc:710
OCTAVE_EXPORT octave_value_list return the value of the option it must match the dimension of the state and the relative tolerance must also be a vector of the same length tem it must match the dimension of the state and the absolute tolerance must also be a vector of the same length The local error test applied at each integration step is xample roup abs(local error in x(i))<
p
Definition: lu.cc:138
T * xdata(void)
Definition: Sparse.h:523
bool is_scalar(void) const
Definition: idx-vector.h:568
octave_idx_type ndims(void) const
Number of dimensions.
Definition: dim-vector.h:301
b
Definition: cellfun.cc:398
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:250
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:200
void assign(const idx_vector &i, const Array< T > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array.cc:1133
virtual ~Sparse(void)
Definition: Sparse.cc:684
void mx_inline_add(size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:108
const T * fortran_vec(void) const
Definition: Array.h:584
octave_idx_type cols(void) const
Definition: Array.h:409
write the output to stdout if nargout is
Definition: load-save.cc:1576
Vector representing the dimensions (size) of an Array.
Definition: dim-vector.h:87
const octave_idx_type * raw(void)
Definition: idx-vector.cc:1032
If this string is the system will ring the terminal sometimes it is useful to be able to print the original representation of the string
Definition: utils.cc:854
octave_idx_type increment(void) const
Definition: idx-vector.cc:1002
Sparse< T > diag(octave_idx_type k=0) const
Definition: Sparse.cc:2404
void print_info(std::ostream &os, const std::string &prefix) const
Definition: Sparse.cc:3000
Sparse< T > transpose(void) const
Definition: Sparse.cc:1090
dim_vector dv
Definition: sub2ind.cc:263
static octave_idx_type lblookup(const octave_idx_type *ridx, octave_idx_type nr, octave_idx_type ri)
Definition: Sparse.cc:1129
idx_vector sorted(bool uniq=false) const
Definition: idx-vector.h:577
Array< T > index(const idx_vector &i) const
Indexing without resizing.
Definition: Array.cc:718
bool is_colon(void) const
Definition: idx-vector.h:565
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:205
Array< T > array_value(void) const
Definition: Sparse.cc:2675