GNU Octave  4.4.1
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Sparse.cc
Go to the documentation of this file.
1 /*
2 
3 Copyright (C) 2004-2018 David Bateman
4 Copyright (C) 1998-2004 Andy Adler
5 Copyright (C) 2010 VZLU Prague
6 
7 This file is part of Octave.
8 
9 Octave is free software: you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published by
11 the Free Software Foundation, either version 3 of the License, or
12 (at your option) any later version.
13 
14 Octave is distributed in the hope that it will be useful, but
15 WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18 
19 You should have received a copy of the GNU General Public License
20 along with Octave; see the file COPYING. If not, see
21 <https://www.gnu.org/licenses/>.
22 
23 */
24 
25 // This file should not include config.h. It is only included in other
26 // C++ source files that should have included config.h before including
27 // this file.
28 
29 #include <cassert>
30 
31 #include <algorithm>
32 #include <iostream>
33 #include <limits>
34 #include <sstream>
35 #include <vector>
36 
37 #include "Array.h"
38 #include "MArray.h"
39 #include "Array-util.h"
40 #include "Range.h"
41 #include "idx-vector.h"
42 #include "lo-error.h"
43 #include "quit.h"
44 #include "oct-locbuf.h"
45 
46 #include "Sparse.h"
47 #include "sparse-sort.h"
48 #include "sparse-util.h"
49 #include "oct-spparms.h"
50 #include "mx-inlines.cc"
51 
52 #include "PermMatrix.h"
53 
54 template <typename T>
55 typename Sparse<T>::SparseRep *
57 {
58  static typename Sparse<T>::SparseRep nr;
59  return &nr;
60 }
61 
62 template <typename T>
63 T&
65 {
67 
68  if (nzmx <= 0)
69  (*current_liboctave_error_handler)
70  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
71 
72  for (i = c[_c]; i < c[_c + 1]; i++)
73  if (r[i] == _r)
74  return d[i];
75  else if (r[i] > _r)
76  break;
77 
78  // Ok, If we've gotten here, we're in trouble. Have to create a
79  // new element in the sparse array. This' gonna be slow!!!
80  if (c[ncols] == nzmx)
81  (*current_liboctave_error_handler)
82  ("Sparse::SparseRep::elem (octave_idx_type, octave_idx_type): sparse matrix filled");
83 
84  octave_idx_type to_move = c[ncols] - i;
85  if (to_move != 0)
86  {
87  for (octave_idx_type j = c[ncols]; j > i; j--)
88  {
89  d[j] = d[j-1];
90  r[j] = r[j-1];
91  }
92  }
93 
94  for (octave_idx_type j = _c + 1; j < ncols + 1; j++)
95  c[j] = c[j] + 1;
96 
97  d[i] = 0.;
98  r[i] = _r;
99 
100  return d[i];
101 }
102 
103 template <typename T>
104 T
106 {
107  if (nzmx > 0)
108  for (octave_idx_type i = c[_c]; i < c[_c + 1]; i++)
109  if (r[i] == _r)
110  return d[i];
111  return T ();
112 }
113 
114 template <typename T>
115 void
117 {
118  if (remove_zeros)
119  {
120  octave_idx_type i = 0;
121  octave_idx_type k = 0;
122  for (octave_idx_type j = 1; j <= ncols; j++)
123  {
124  octave_idx_type u = c[j];
125  for (; i < u; i++)
126  if (d[i] != T ())
127  {
128  d[k] = d[i];
129  r[k++] = r[i];
130  }
131  c[j] = k;
132  }
133  }
134 
135  change_length (c[ncols]);
136 }
137 
138 template <typename T>
139 void
141 {
142  for (octave_idx_type j = ncols; j > 0 && c[j] > nz; j--)
143  c[j] = nz;
144 
145  // Skip reallocation if we have less than 1/frac extra elements to discard.
146  static const int frac = 5;
147  if (nz > nzmx || nz < nzmx - nzmx/frac)
148  {
149  // Reallocate.
150  octave_idx_type min_nzmx = std::min (nz, nzmx);
151 
152  octave_idx_type *new_ridx = new octave_idx_type [nz];
153  std::copy_n (r, min_nzmx, new_ridx);
154 
155  delete [] r;
156  r = new_ridx;
157 
158  T *new_data = new T [nz];
159  std::copy_n (d, min_nzmx, new_data);
160 
161  delete [] d;
162  d = new_data;
163 
164  nzmx = nz;
165  }
166 }
167 
168 template <typename T>
169 bool
171 {
172  return sparse_indices_ok (r, c, nrows, ncols, nnz ());
173 }
174 
175 template <typename T>
176 bool
178 {
179  octave_idx_type nz = nnz ();
180 
181  for (octave_idx_type i = 0; i < nz; i++)
182  if (octave::math::isnan (d[i]))
183  return true;
184 
185  return false;
186 }
187 
188 template <typename T>
190  : rep (nullptr), dimensions (dim_vector (nr, nc))
191 {
192  if (val != T ())
193  {
194  rep = new typename Sparse<T>::SparseRep (nr, nc, dimensions.safe_numel ());
195 
196  octave_idx_type ii = 0;
197  xcidx (0) = 0;
198  for (octave_idx_type j = 0; j < nc; j++)
199  {
200  for (octave_idx_type i = 0; i < nr; i++)
201  {
202  xdata (ii) = val;
203  xridx (ii++) = i;
204  }
205  xcidx (j+1) = ii;
206  }
207  }
208  else
209  {
210  rep = new typename Sparse<T>::SparseRep (nr, nc, 0);
211  for (octave_idx_type j = 0; j < nc+1; j++)
212  xcidx (j) = 0;
213  }
214 }
215 
216 template <typename T>
218  : rep (new typename Sparse<T>::SparseRep (a.rows (), a.cols (), a.rows ())),
219  dimensions (dim_vector (a.rows (), a.cols ()))
220 {
221  octave_idx_type n = a.rows ();
222  for (octave_idx_type i = 0; i <= n; i++)
223  cidx (i) = i;
224 
225  const Array<octave_idx_type> pv = a.col_perm_vec ();
226 
227  for (octave_idx_type i = 0; i < n; i++)
228  ridx (i) = pv(i);
229 
230  for (octave_idx_type i = 0; i < n; i++)
231  data (i) = 1.0;
232 }
233 
234 template <typename T>
236  : rep (nullptr), dimensions (dv)
237 {
238  if (dv.ndims () != 2)
240  ("Sparse::Sparse (const dim_vector&): dimension mismatch");
241 
242  rep = new typename Sparse<T>::SparseRep (dv(0), dv(1), 0);
243 }
244 
245 template <typename T>
247  : rep (nullptr), dimensions (dv)
248 {
249 
250  // Work in unsigned long long to avoid overflow issues with numel
251  unsigned long long a_nel = static_cast<unsigned long long>(a.rows ()) *
252  static_cast<unsigned long long>(a.cols ());
253  unsigned long long dv_nel = static_cast<unsigned long long>(dv(0)) *
254  static_cast<unsigned long long>(dv(1));
255 
256  if (a_nel != dv_nel)
257  (*current_liboctave_error_handler)
258  ("Sparse::Sparse (const Sparse&, const dim_vector&): dimension mismatch");
259 
260  dim_vector old_dims = a.dims ();
261  octave_idx_type new_nzmx = a.nnz ();
262  octave_idx_type new_nr = dv(0);
263  octave_idx_type new_nc = dv(1);
264  octave_idx_type old_nr = old_dims(0);
265  octave_idx_type old_nc = old_dims(1);
266 
267  rep = new typename Sparse<T>::SparseRep (new_nr, new_nc, new_nzmx);
268 
269  octave_idx_type kk = 0;
270  xcidx (0) = 0;
271  for (octave_idx_type i = 0; i < old_nc; i++)
272  for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++)
273  {
274  octave_idx_type tmp = i * old_nr + a.ridx (j);
275  octave_idx_type ii = tmp % new_nr;
276  octave_idx_type jj = (tmp - ii) / new_nr;
277  for (octave_idx_type k = kk; k < jj; k++)
278  xcidx (k+1) = j;
279  kk = jj;
280  xdata (j) = a.data (j);
281  xridx (j) = ii;
282  }
283  for (octave_idx_type k = kk; k < new_nc; k++)
284  xcidx (k+1) = new_nzmx;
285 }
286 
287 template <typename T>
289  const idx_vector& c, octave_idx_type nr,
290  octave_idx_type nc, bool sum_terms,
291  octave_idx_type nzm)
292  : rep (nullptr), dimensions ()
293 {
294  if (nr < 0)
295  nr = r.extent (0);
296  else if (r.extent (nr) > nr)
297  (*current_liboctave_error_handler) ("sparse: row index %d out of bound %d",
298  r.extent (nr), nr);
299 
300  if (nc < 0)
301  nc = c.extent (0);
302  else if (c.extent (nc) > nc)
304  ("sparse: column index %d out of bound %d", r.extent (nc), nc);
305 
306  dimensions = dim_vector (nr, nc);
307 
308  octave_idx_type n = a.numel ();
309  octave_idx_type rl = r.length (nr);
310  octave_idx_type cl = c.length (nc);
311  bool a_scalar = n == 1;
312  if (a_scalar)
313  {
314  if (rl != 1)
315  n = rl;
316  else if (cl != 1)
317  n = cl;
318  }
319 
320  if ((rl != 1 && rl != n) || (cl != 1 && cl != n))
321  (*current_liboctave_error_handler) ("sparse: dimension mismatch");
322 
323  // Only create rep after input validation to avoid memory leak.
324  rep = new typename Sparse<T>::SparseRep (nr, nc, (nzm > 0 ? nzm : 0));
325 
326  if (rl <= 1 && cl <= 1)
327  {
328  if (n == 1 && a(0) != T ())
329  {
330  change_capacity (nzm > 1 ? nzm : 1);
331  xridx (0) = r(0);
332  xdata (0) = a(0);
333  std::fill_n (xcidx () + c(0) + 1, nc - c(0), 1);
334  }
335  }
336  else if (a_scalar)
337  {
338  // This is completely specialized, because the sorts can be simplified.
339  T a0 = a(0);
340  if (a0 == T ())
341  {
342  // Do nothing, it's an empty matrix.
343  }
344  else if (cl == 1)
345  {
346  // Sparse column vector. Sort row indices.
347  idx_vector rs = r.sorted ();
348 
349  octave_quit ();
350 
351  const octave_idx_type *rd = rs.raw ();
352  // Count unique indices.
353  octave_idx_type new_nz = 1;
354  for (octave_idx_type i = 1; i < n; i++)
355  new_nz += rd[i-1] != rd[i];
356 
357  // Allocate result.
358  change_capacity (nzm > new_nz ? nzm : new_nz);
359  std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
360 
361  octave_idx_type *rri = ridx ();
362  T *rrd = data ();
363 
364  octave_quit ();
365 
366  octave_idx_type k = -1;
367  octave_idx_type l = -1;
368 
369  if (sum_terms)
370  {
371  // Sum repeated indices.
372  for (octave_idx_type i = 0; i < n; i++)
373  {
374  if (rd[i] != l)
375  {
376  l = rd[i];
377  rri[++k] = rd[i];
378  rrd[k] = a0;
379  }
380  else
381  rrd[k] += a0;
382  }
383  }
384  else
385  {
386  // Pick the last one.
387  for (octave_idx_type i = 0; i < n; i++)
388  {
389  if (rd[i] != l)
390  {
391  l = rd[i];
392  rri[++k] = rd[i];
393  rrd[k] = a0;
394  }
395  }
396  }
397 
398  }
399  else
400  {
401  idx_vector rr = r;
402  idx_vector cc = c;
403  const octave_idx_type *rd = rr.raw ();
404  const octave_idx_type *cd = cc.raw ();
406  ci[0] = 0;
407  // Bin counts of column indices.
408  for (octave_idx_type i = 0; i < n; i++)
409  ci[cd[i]+1]++;
410  // Make them cumulative, shifted one to right.
411  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
412  {
413  octave_idx_type s1 = s + ci[i];
414  ci[i] = s;
415  s = s1;
416  }
417 
418  octave_quit ();
419 
420  // Bucket sort.
422  for (octave_idx_type i = 0; i < n; i++)
423  if (rl == 1)
424  sidx[ci[cd[i]+1]++] = rd[0];
425  else
426  sidx[ci[cd[i]+1]++] = rd[i];
427 
428  // Subsorts. We don't need a stable sort, all values are equal.
429  xcidx (0) = 0;
430  for (octave_idx_type j = 0; j < nc; j++)
431  {
432  std::sort (sidx + ci[j], sidx + ci[j+1]);
433  octave_idx_type l = -1;
434  octave_idx_type nzj = 0;
435  // Count.
436  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
437  {
438  octave_idx_type k = sidx[i];
439  if (k != l)
440  {
441  l = k;
442  nzj++;
443  }
444  }
445  // Set column pointer.
446  xcidx (j+1) = xcidx (j) + nzj;
447  }
448 
449  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
450  octave_idx_type *rri = ridx ();
451  T *rrd = data ();
452 
453  // Fill-in data.
454  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
455  {
456  octave_quit ();
457  octave_idx_type l = -1;
458  if (sum_terms)
459  {
460  // Sum adjacent terms.
461  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
462  {
463  octave_idx_type k = sidx[i];
464  if (k != l)
465  {
466  l = k;
467  rrd[++jj] = a0;
468  rri[jj] = k;
469  }
470  else
471  rrd[jj] += a0;
472  }
473  }
474  else
475  {
476  // Use the last one.
477  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
478  {
479  octave_idx_type k = sidx[i];
480  if (k != l)
481  {
482  l = k;
483  rrd[++jj] = a0;
484  rri[jj] = k;
485  }
486  }
487  }
488  }
489  }
490  }
491  else if (cl == 1)
492  {
493  // Sparse column vector. Sort row indices.
495  idx_vector rs = r.sorted (rsi);
496 
497  octave_quit ();
498 
499  const octave_idx_type *rd = rs.raw ();
500  const octave_idx_type *rdi = rsi.data ();
501  // Count unique indices.
502  octave_idx_type new_nz = 1;
503  for (octave_idx_type i = 1; i < n; i++)
504  new_nz += rd[i-1] != rd[i];
505 
506  // Allocate result.
507  change_capacity (nzm > new_nz ? nzm : new_nz);
508  std::fill_n (xcidx () + c(0) + 1, nc - c(0), new_nz);
509 
510  octave_idx_type *rri = ridx ();
511  T *rrd = data ();
512 
513  octave_quit ();
514 
515  octave_idx_type k = 0;
516  rri[k] = rd[0];
517  rrd[k] = a(rdi[0]);
518 
519  if (sum_terms)
520  {
521  // Sum repeated indices.
522  for (octave_idx_type i = 1; i < n; i++)
523  {
524  if (rd[i] != rd[i-1])
525  {
526  rri[++k] = rd[i];
527  rrd[k] = a(rdi[i]);
528  }
529  else
530  rrd[k] += a(rdi[i]);
531  }
532  }
533  else
534  {
535  // Pick the last one.
536  for (octave_idx_type i = 1; i < n; i++)
537  {
538  if (rd[i] != rd[i-1])
539  rri[++k] = rd[i];
540  rrd[k] = a(rdi[i]);
541  }
542  }
543 
544  maybe_compress (true);
545  }
546  else
547  {
548  idx_vector rr = r;
549  idx_vector cc = c;
550  const octave_idx_type *rd = rr.raw ();
551  const octave_idx_type *cd = cc.raw ();
553  ci[0] = 0;
554  // Bin counts of column indices.
555  for (octave_idx_type i = 0; i < n; i++)
556  ci[cd[i]+1]++;
557  // Make them cumulative, shifted one to right.
558  for (octave_idx_type i = 1, s = 0; i <= nc; i++)
559  {
560  octave_idx_type s1 = s + ci[i];
561  ci[i] = s;
562  s = s1;
563  }
564 
565  octave_quit ();
566 
567  typedef std::pair<octave_idx_type, octave_idx_type> idx_pair;
568  // Bucket sort.
569  OCTAVE_LOCAL_BUFFER (idx_pair, spairs, n);
570  for (octave_idx_type i = 0; i < n; i++)
571  {
572  idx_pair& p = spairs[ci[cd[i]+1]++];
573  if (rl == 1)
574  p.first = rd[0];
575  else
576  p.first = rd[i];
577  p.second = i;
578  }
579 
580  // Subsorts. We don't need a stable sort, the second index stabilizes it.
581  xcidx (0) = 0;
582  for (octave_idx_type j = 0; j < nc; j++)
583  {
584  std::sort (spairs + ci[j], spairs + ci[j+1]);
585  octave_idx_type l = -1;
586  octave_idx_type nzj = 0;
587  // Count.
588  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
589  {
590  octave_idx_type k = spairs[i].first;
591  if (k != l)
592  {
593  l = k;
594  nzj++;
595  }
596  }
597  // Set column pointer.
598  xcidx (j+1) = xcidx (j) + nzj;
599  }
600 
601  change_capacity (nzm > xcidx (nc) ? nzm : xcidx (nc));
602  octave_idx_type *rri = ridx ();
603  T *rrd = data ();
604 
605  // Fill-in data.
606  for (octave_idx_type j = 0, jj = -1; j < nc; j++)
607  {
608  octave_quit ();
609  octave_idx_type l = -1;
610  if (sum_terms)
611  {
612  // Sum adjacent terms.
613  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
614  {
615  octave_idx_type k = spairs[i].first;
616  if (k != l)
617  {
618  l = k;
619  rrd[++jj] = a(spairs[i].second);
620  rri[jj] = k;
621  }
622  else
623  rrd[jj] += a(spairs[i].second);
624  }
625  }
626  else
627  {
628  // Use the last one.
629  for (octave_idx_type i = ci[j]; i < ci[j+1]; i++)
630  {
631  octave_idx_type k = spairs[i].first;
632  if (k != l)
633  {
634  l = k;
635  rri[++jj] = k;
636  }
637  rrd[jj] = a(spairs[i].second);
638  }
639  }
640  }
641 
642  maybe_compress (true);
643  }
644 }
645 
646 /*
647 %!assert <51880> (sparse (1:2, 2, 1:2, 2, 2), sparse ([0, 1; 0, 2]))
648 %!assert <51880> (sparse (1:2, 1, 1:2, 2, 2), sparse ([1, 0; 2, 0]))
649 %!assert <51880> (sparse (1:2, 2, 1:2, 2, 3), sparse ([0, 1, 0; 0, 2, 0]))
650 */
651 
652 template <typename T>
654  : rep (nullptr), dimensions (a.dims ())
655 {
656  if (dimensions.ndims () > 2)
658  ("Sparse::Sparse (const Array<T>&): dimension mismatch");
659 
660  octave_idx_type nr = rows ();
661  octave_idx_type nc = cols ();
662  octave_idx_type len = a.numel ();
663  octave_idx_type new_nzmx = 0;
664 
665  // First count the number of nonzero terms
666  for (octave_idx_type i = 0; i < len; i++)
667  if (a(i) != T ())
668  new_nzmx++;
669 
670  rep = new typename Sparse<T>::SparseRep (nr, nc, new_nzmx);
671 
672  octave_idx_type ii = 0;
673  xcidx (0) = 0;
674  for (octave_idx_type j = 0; j < nc; j++)
675  {
676  for (octave_idx_type i = 0; i < nr; i++)
677  if (a.elem (i,j) != T ())
678  {
679  xdata (ii) = a.elem (i,j);
680  xridx (ii++) = i;
681  }
682  xcidx (j+1) = ii;
683  }
684 }
685 
686 template <typename T>
688 {
689  if (--rep->count == 0)
690  delete rep;
691 }
692 
693 template <typename T>
694 Sparse<T>&
696 {
697  if (this != &a)
698  {
699  if (--rep->count == 0)
700  delete rep;
701 
702  rep = a.rep;
703  rep->count++;
704 
705  dimensions = a.dimensions;
706  }
707 
708  return *this;
709 }
710 
711 template <typename T>
714 {
715  octave_idx_type n = dimensions.ndims ();
716 
717  if (n <= 0 || n != ra_idx.numel ())
719  ("Sparse<T>::compute_index: invalid ra_idxing operation");
720 
721  octave_idx_type retval = -1;
722 
723  retval = ra_idx(--n);
724 
725  while (--n >= 0)
726  {
727  retval *= dimensions(n);
728  retval += ra_idx(n);
729  }
730 
731  return retval;
732 }
733 
734 template <typename T>
735 T
737 {
738  (*current_liboctave_error_handler) ("%s (%d): range error", fcn, n);
739 }
740 
741 template <typename T>
742 T&
744 {
745  (*current_liboctave_error_handler) ("%s (%d): range error", fcn, n);
746 }
747 
748 template <typename T>
749 T
751  octave_idx_type j) const
752 {
753  (*current_liboctave_error_handler) ("%s (%d, %d): range error", fcn, i, j);
754 }
755 
756 template <typename T>
757 T&
759 {
760  (*current_liboctave_error_handler) ("%s (%d, %d): range error", fcn, i, j);
761 }
762 
763 template <typename T>
764 T
766  const Array<octave_idx_type>& ra_idx) const
767 {
768  std::ostringstream buf;
769 
770  buf << fcn << " (";
771 
773 
774  if (n > 0)
775  buf << ra_idx(0);
776 
777  for (octave_idx_type i = 1; i < n; i++)
778  buf << ", " << ra_idx(i);
779 
780  buf << "): range error";
781 
782  std::string buf_str = buf.str ();
783 
784  (*current_liboctave_error_handler) (buf_str.c_str ());
785 }
786 
787 template <typename T>
788 T&
790 {
791  std::ostringstream buf;
792 
793  buf << fcn << " (";
794 
796 
797  if (n > 0)
798  buf << ra_idx(0);
799 
800  for (octave_idx_type i = 1; i < n; i++)
801  buf << ", " << ra_idx(i);
802 
803  buf << "): range error";
804 
805  std::string buf_str = buf.str ();
806 
807  (*current_liboctave_error_handler) (buf_str.c_str ());
808 }
809 
810 template <typename T>
811 Sparse<T>
812 Sparse<T>::reshape (const dim_vector& new_dims) const
813 {
815  dim_vector dims2 = new_dims;
816 
817  if (dims2.ndims () > 2)
818  {
819  (*current_liboctave_warning_with_id_handler)
820  ("Octave:reshape-smashes-dims",
821  "reshape: sparse reshape to N-D array smashes dims");
822 
823  for (octave_idx_type i = 2; i < dims2.ndims (); i++)
824  dims2(1) *= dims2(i);
825 
826  dims2.resize (2);
827  }
828 
829  if (dimensions != dims2)
830  {
831  if (dimensions.numel () == dims2.numel ())
832  {
833  octave_idx_type new_nnz = nnz ();
834  octave_idx_type new_nr = dims2 (0);
835  octave_idx_type new_nc = dims2 (1);
836  octave_idx_type old_nr = rows ();
837  octave_idx_type old_nc = cols ();
838  retval = Sparse<T> (new_nr, new_nc, new_nnz);
839 
840  octave_idx_type kk = 0;
841  retval.xcidx (0) = 0;
842  // Quotient and remainder of i * old_nr divided by new_nr.
843  // Track them individually to avoid overflow (bug #42850).
844  octave_idx_type i_old_qu = 0;
845  octave_idx_type i_old_rm = static_cast<octave_idx_type> (-old_nr);
846  for (octave_idx_type i = 0; i < old_nc; i++)
847  {
848  i_old_rm += old_nr;
849  if (i_old_rm >= new_nr)
850  {
851  i_old_qu += i_old_rm / new_nr;
852  i_old_rm = i_old_rm % new_nr;
853  }
854  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
855  {
856  octave_idx_type ii, jj;
857  ii = (i_old_rm + ridx (j)) % new_nr;
858  jj = i_old_qu + (i_old_rm + ridx (j)) / new_nr;
859 
860  // Original calculation subject to overflow
861  // ii = (i*old_nr + ridx (j)) % new_nr
862  // jj = (i*old_nr + ridx (j)) / new_nr
863  for (octave_idx_type k = kk; k < jj; k++)
864  retval.xcidx (k+1) = j;
865  kk = jj;
866  retval.xdata (j) = data (j);
867  retval.xridx (j) = ii;
868  }
869  }
870  for (octave_idx_type k = kk; k < new_nc; k++)
871  retval.xcidx (k+1) = new_nnz;
872  }
873  else
874  {
875  std::string dimensions_str = dimensions.str ();
876  std::string new_dims_str = new_dims.str ();
877 
878  (*current_liboctave_error_handler)
879  ("reshape: can't reshape %s array to %s array",
880  dimensions_str.c_str (), new_dims_str.c_str ());
881  }
882  }
883  else
884  retval = *this;
885 
886  return retval;
887 }
888 
889 template <typename T>
890 Sparse<T>
891 Sparse<T>::permute (const Array<octave_idx_type>& perm_vec, bool) const
892 {
893  // The only valid permutations of a sparse array are [1, 2] and [2, 1].
894 
895  bool fail = false;
896  bool trans = false;
897 
898  if (perm_vec.numel () == 2)
899  {
900  if (perm_vec(0) == 0 && perm_vec(1) == 1)
901  /* do nothing */;
902  else if (perm_vec(0) == 1 && perm_vec(1) == 0)
903  trans = true;
904  else
905  fail = true;
906  }
907  else
908  fail = true;
909 
910  if (fail)
911  (*current_liboctave_error_handler)
912  ("permutation vector contains an invalid element");
913 
914  return trans ? this->transpose () : *this;
915 }
916 
917 template <typename T>
918 void
920 {
921  octave_idx_type nr = rows ();
922  octave_idx_type nc = cols ();
923 
924  if (nr == 0)
925  resize (1, std::max (nc, n));
926  else if (nc == 0)
927  resize (nr, (n + nr - 1) / nr); // Ain't it wicked?
928  else if (nr == 1)
929  resize (1, n);
930  else if (nc == 1)
931  resize (n, 1);
932  else
934 }
935 
936 template <typename T>
937 void
939 {
940  octave_idx_type n = dv.ndims ();
941 
942  if (n != 2)
943  (*current_liboctave_error_handler) ("sparse array must be 2-D");
944 
945  resize (dv(0), dv(1));
946 }
947 
948 template <typename T>
949 void
951 {
952  if (r < 0 || c < 0)
953  (*current_liboctave_error_handler) ("can't resize to negative dimension");
954 
955  if (r == dim1 () && c == dim2 ())
956  return;
957 
958  // This wouldn't be necessary for r >= rows () if nrows wasn't part of the
959  // Sparse rep. It is not good for anything in there.
960  make_unique ();
961 
962  if (r < rows ())
963  {
964  octave_idx_type i = 0;
965  octave_idx_type k = 0;
966  for (octave_idx_type j = 1; j <= rep->ncols; j++)
967  {
968  octave_idx_type u = xcidx (j);
969  for (; i < u; i++)
970  if (xridx (i) < r)
971  {
972  xdata (k) = xdata (i);
973  xridx (k++) = xridx (i);
974  }
975  xcidx (j) = k;
976  }
977  }
978 
979  rep->nrows = dimensions(0) = r;
980 
981  if (c != rep->ncols)
982  {
983  octave_idx_type *new_cidx = new octave_idx_type [c+1];
984  std::copy_n (rep->c, std::min (c, rep->ncols) + 1, new_cidx);
985  delete [] rep->c;
986  rep->c = new_cidx;
987 
988  if (c > rep->ncols)
989  std::fill_n (rep->c + rep->ncols + 1, c - rep->ncols,
990  rep->c[rep->ncols]);
991  }
992 
993  rep->ncols = dimensions(1) = c;
994 
995  rep->change_length (rep->nnz ());
996 }
997 
998 template <typename T>
999 Sparse<T>&
1001 {
1002  octave_idx_type a_rows = a.rows ();
1003  octave_idx_type a_cols = a.cols ();
1004  octave_idx_type nr = rows ();
1005  octave_idx_type nc = cols ();
1006 
1007  if (r < 0 || r + a_rows > rows () || c < 0 || c + a_cols > cols ())
1008  (*current_liboctave_error_handler) ("range error for insert");
1009 
1010  // First count the number of elements in the final array
1011  octave_idx_type nel = cidx (c) + a.nnz ();
1012 
1013  if (c + a_cols < nc)
1014  nel += cidx (nc) - cidx (c + a_cols);
1015 
1016  for (octave_idx_type i = c; i < c + a_cols; i++)
1017  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1018  if (ridx (j) < r || ridx (j) >= r + a_rows)
1019  nel++;
1020 
1021  Sparse<T> tmp (*this);
1022  --rep->count;
1023  rep = new typename Sparse<T>::SparseRep (nr, nc, nel);
1024 
1025  for (octave_idx_type i = 0; i < tmp.cidx (c); i++)
1026  {
1027  data (i) = tmp.data (i);
1028  ridx (i) = tmp.ridx (i);
1029  }
1030  for (octave_idx_type i = 0; i < c + 1; i++)
1031  cidx (i) = tmp.cidx (i);
1032 
1033  octave_idx_type ii = cidx (c);
1034 
1035  for (octave_idx_type i = c; i < c + a_cols; i++)
1036  {
1037  octave_quit ();
1038 
1039  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1040  if (tmp.ridx (j) < r)
1041  {
1042  data (ii) = tmp.data (j);
1043  ridx (ii++) = tmp.ridx (j);
1044  }
1045 
1046  octave_quit ();
1047 
1048  for (octave_idx_type j = a.cidx (i-c); j < a.cidx (i-c+1); j++)
1049  {
1050  data (ii) = a.data (j);
1051  ridx (ii++) = r + a.ridx (j);
1052  }
1053 
1054  octave_quit ();
1055 
1056  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1057  if (tmp.ridx (j) >= r + a_rows)
1058  {
1059  data (ii) = tmp.data (j);
1060  ridx (ii++) = tmp.ridx (j);
1061  }
1062 
1063  cidx (i+1) = ii;
1064  }
1065 
1066  for (octave_idx_type i = c + a_cols; i < nc; i++)
1067  {
1068  for (octave_idx_type j = tmp.cidx (i); j < tmp.cidx (i+1); j++)
1069  {
1070  data (ii) = tmp.data (j);
1071  ridx (ii++) = tmp.ridx (j);
1072  }
1073  cidx (i+1) = ii;
1074  }
1075 
1076  return *this;
1077 }
1078 
1079 template <typename T>
1080 Sparse<T>&
1082 {
1083 
1084  if (ra_idx.numel () != 2)
1085  (*current_liboctave_error_handler) ("range error for insert");
1086 
1087  return insert (a, ra_idx(0), ra_idx(1));
1088 }
1089 
1090 template <typename T>
1091 Sparse<T>
1093 {
1094  assert (ndims () == 2);
1095 
1096  octave_idx_type nr = rows ();
1097  octave_idx_type nc = cols ();
1098  octave_idx_type nz = nnz ();
1099  Sparse<T> retval (nc, nr, nz);
1100 
1101  for (octave_idx_type i = 0; i < nz; i++)
1102  retval.xcidx (ridx (i) + 1)++;
1103  // retval.xcidx[1:nr] holds the row degrees for rows 0:(nr-1)
1104  nz = 0;
1105  for (octave_idx_type i = 1; i <= nr; i++)
1106  {
1107  const octave_idx_type tmp = retval.xcidx (i);
1108  retval.xcidx (i) = nz;
1109  nz += tmp;
1110  }
1111  // retval.xcidx[1:nr] holds row entry *start* offsets for rows 0:(nr-1)
1112 
1113  for (octave_idx_type j = 0; j < nc; j++)
1114  for (octave_idx_type k = cidx (j); k < cidx (j+1); k++)
1115  {
1116  octave_idx_type q = retval.xcidx (ridx (k) + 1)++;
1117  retval.xridx (q) = j;
1118  retval.xdata (q) = data (k);
1119  }
1120  assert (nnz () == retval.xcidx (nr));
1121  // retval.xcidx[1:nr] holds row entry *end* offsets for rows 0:(nr-1)
1122  // and retval.xcidx[0:(nr-1)] holds their row entry *start* offsets
1123 
1124  return retval;
1125 }
1126 
1127 // Lower bound lookup. Could also use octave_sort, but that has upper bound
1128 // semantics, so requires some manipulation to set right. Uses a plain loop
1129 // for small columns.
1130 static octave_idx_type
1132  octave_idx_type ri)
1133 {
1134  if (nr <= 8)
1135  {
1136  octave_idx_type l;
1137  for (l = 0; l < nr; l++)
1138  if (ridx[l] >= ri)
1139  break;
1140  return l;
1141  }
1142  else
1143  return std::lower_bound (ridx, ridx + nr, ri) - ridx;
1144 }
1145 
1146 template <typename T>
1147 void
1149 {
1150  Sparse<T> retval;
1151 
1152  assert (ndims () == 2);
1153 
1154  octave_idx_type nr = dim1 ();
1155  octave_idx_type nc = dim2 ();
1156  octave_idx_type nz = nnz ();
1157 
1158  octave_idx_type nel = numel (); // Can throw.
1159 
1160  const dim_vector idx_dims = idx.orig_dimensions ();
1161 
1162  if (idx.extent (nel) > nel)
1163  octave::err_del_index_out_of_range (true, idx.extent (nel), nel);
1164 
1165  if (nc == 1)
1166  {
1167  // Sparse column vector.
1168  const Sparse<T> tmp = *this; // constant copy to prevent COW.
1169 
1170  octave_idx_type lb, ub;
1171 
1172  if (idx.is_cont_range (nel, lb, ub))
1173  {
1174  // Special-case a contiguous range.
1175  // Look-up indices first.
1176  octave_idx_type li = lblookup (tmp.ridx (), nz, lb);
1177  octave_idx_type ui = lblookup (tmp.ridx (), nz, ub);
1178  // Copy data and adjust indices.
1179  octave_idx_type nz_new = nz - (ui - li);
1180  *this = Sparse<T> (nr - (ub - lb), 1, nz_new);
1181  std::copy_n (tmp.data (), li, data ());
1182  std::copy_n (tmp.ridx (), li, xridx ());
1183  std::copy (tmp.data () + ui, tmp.data () + nz, xdata () + li);
1184  mx_inline_sub (nz - ui, xridx () + li, tmp.ridx () + ui, ub - lb);
1185  xcidx (1) = nz_new;
1186  }
1187  else
1188  {
1189  OCTAVE_LOCAL_BUFFER (octave_idx_type, ridx_new, nz);
1190  OCTAVE_LOCAL_BUFFER (T, data_new, nz);
1191  idx_vector sidx = idx.sorted (true);
1192  const octave_idx_type *sj = sidx.raw ();
1193  octave_idx_type sl = sidx.length (nel);
1194  octave_idx_type nz_new = 0;
1195  octave_idx_type j = 0;
1196  for (octave_idx_type i = 0; i < nz; i++)
1197  {
1198  octave_idx_type r = tmp.ridx (i);
1199  for (; j < sl && sj[j] < r; j++) ;
1200  if (j == sl || sj[j] > r)
1201  {
1202  data_new[nz_new] = tmp.data (i);
1203  ridx_new[nz_new++] = r - j;
1204  }
1205  }
1206 
1207  *this = Sparse<T> (nr - sl, 1, nz_new);
1208  std::copy_n (ridx_new, nz_new, ridx ());
1209  std::copy_n (data_new, nz_new, xdata ());
1210  xcidx (1) = nz_new;
1211  }
1212  }
1213  else if (nr == 1)
1214  {
1215  // Sparse row vector.
1216  octave_idx_type lb, ub;
1217  if (idx.is_cont_range (nc, lb, ub))
1218  {
1219  const Sparse<T> tmp = *this;
1220  octave_idx_type lbi = tmp.cidx (lb);
1221  octave_idx_type ubi = tmp.cidx (ub);
1222  octave_idx_type new_nz = nz - (ubi - lbi);
1223  *this = Sparse<T> (1, nc - (ub - lb), new_nz);
1224  std::copy_n (tmp.data (), lbi, data ());
1225  std::copy (tmp.data () + ubi, tmp.data () + nz , xdata () + lbi);
1226  std::fill_n (ridx (), new_nz, static_cast<octave_idx_type> (0));
1227  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1228  mx_inline_sub (nc - ub, xcidx () + 1, tmp.cidx () + ub + 1,
1229  ubi - lbi);
1230  }
1231  else
1232  *this = index (idx.complement (nc));
1233  }
1234  else if (idx.length (nel) != 0)
1235  {
1236  if (idx.is_colon_equiv (nel))
1237  *this = Sparse<T> ();
1238  else
1239  {
1240  *this = index (idx_vector::colon);
1241  delete_elements (idx);
1242  *this = transpose (); // We want a row vector.
1243  }
1244  }
1245 }
1246 
1247 template <typename T>
1248 void
1250 {
1251  assert (ndims () == 2);
1252 
1253  octave_idx_type nr = dim1 ();
1254  octave_idx_type nc = dim2 ();
1255  octave_idx_type nz = nnz ();
1256 
1257  if (idx_i.is_colon ())
1258  {
1259  // Deleting columns.
1260  octave_idx_type lb, ub;
1261  if (idx_j.extent (nc) > nc)
1262  octave::err_del_index_out_of_range (false, idx_j.extent (nc), nc);
1263  else if (idx_j.is_cont_range (nc, lb, ub))
1264  {
1265  if (lb == 0 && ub == nc)
1266  {
1267  // Delete all rows and columns.
1268  *this = Sparse<T> (nr, 0);
1269  }
1270  else if (nz == 0)
1271  {
1272  // No elements to preserve; adjust dimensions.
1273  *this = Sparse<T> (nr, nc - (ub - lb));
1274  }
1275  else
1276  {
1277  const Sparse<T> tmp = *this;
1278  octave_idx_type lbi = tmp.cidx (lb);
1279  octave_idx_type ubi = tmp.cidx (ub);
1280  octave_idx_type new_nz = nz - (ubi - lbi);
1281 
1282  *this = Sparse<T> (nr, nc - (ub - lb), new_nz);
1283  std::copy_n (tmp.data (), lbi, data ());
1284  std::copy_n (tmp.ridx (), lbi, ridx ());
1285  std::copy (tmp.data () + ubi, tmp.data () + nz, xdata () + lbi);
1286  std::copy (tmp.ridx () + ubi, tmp.ridx () + nz, xridx () + lbi);
1287  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
1288  mx_inline_sub (nc - ub, xcidx () + lb + 1,
1289  tmp.cidx () + ub + 1, ubi - lbi);
1290  }
1291  }
1292  else
1293  *this = index (idx_i, idx_j.complement (nc));
1294  }
1295  else if (idx_j.is_colon ())
1296  {
1297  // Deleting rows.
1298  octave_idx_type lb, ub;
1299  if (idx_i.extent (nr) > nr)
1300  octave::err_del_index_out_of_range (false, idx_i.extent (nr), nr);
1301  else if (idx_i.is_cont_range (nr, lb, ub))
1302  {
1303  if (lb == 0 && ub == nr)
1304  {
1305  // Delete all rows and columns.
1306  *this = Sparse<T> (0, nc);
1307  }
1308  else if (nz == 0)
1309  {
1310  // No elements to preserve; adjust dimensions.
1311  *this = Sparse<T> (nr - (ub - lb), nc);
1312  }
1313  else
1314  {
1315  // This is more memory-efficient than the approach below.
1316  const Sparse<T> tmpl = index (idx_vector (0, lb), idx_j);
1317  const Sparse<T> tmpu = index (idx_vector (ub, nr), idx_j);
1318  *this = Sparse<T> (nr - (ub - lb), nc,
1319  tmpl.nnz () + tmpu.nnz ());
1320  for (octave_idx_type j = 0, k = 0; j < nc; j++)
1321  {
1322  for (octave_idx_type i = tmpl.cidx (j); i < tmpl.cidx (j+1);
1323  i++)
1324  {
1325  xdata (k) = tmpl.data (i);
1326  xridx (k++) = tmpl.ridx (i);
1327  }
1328  for (octave_idx_type i = tmpu.cidx (j); i < tmpu.cidx (j+1);
1329  i++)
1330  {
1331  xdata (k) = tmpu.data (i);
1332  xridx (k++) = tmpu.ridx (i) + lb;
1333  }
1334 
1335  xcidx (j+1) = k;
1336  }
1337  }
1338  }
1339  else
1340  {
1341  // This is done by transposing, deleting columns, then transposing
1342  // again.
1343  Sparse<T> tmp = transpose ();
1344  tmp.delete_elements (idx_j, idx_i);
1345  *this = tmp.transpose ();
1346  }
1347  }
1348  else
1349  {
1350  // Empty assignment (no elements to delete) is OK if there is at
1351  // least one zero-length index and at most one other index that is
1352  // non-colon (or equivalent) index. Since we only have two
1353  // indices, we just need to check that we have at least one zero
1354  // length index. Matlab considers "[]" to be an empty index but
1355  // not "false". We accept both.
1356 
1357  bool empty_assignment
1358  = (idx_i.length (nr) == 0 || idx_j.length (nc) == 0);
1359 
1360  if (! empty_assignment)
1361  (*current_liboctave_error_handler)
1362  ("a null assignment can only have one non-colon index");
1363  }
1364 }
1365 
1366 template <typename T>
1367 void
1369 {
1370  if (dim == 0)
1371  delete_elements (idx, idx_vector::colon);
1372  else if (dim == 1)
1373  delete_elements (idx_vector::colon, idx);
1374  else
1375  (*current_liboctave_error_handler) ("invalid dimension in delete_elements");
1376 }
1377 
1378 template <typename T>
1379 Sparse<T>
1380 Sparse<T>::index (const idx_vector& idx, bool resize_ok) const
1381 {
1382  Sparse<T> retval;
1383 
1384  assert (ndims () == 2);
1385 
1386  octave_idx_type nr = dim1 ();
1387  octave_idx_type nc = dim2 ();
1388  octave_idx_type nz = nnz ();
1389 
1390  octave_idx_type nel = numel (); // Can throw.
1391 
1392  const dim_vector idx_dims = idx.orig_dimensions ().redim (2);
1393 
1394  if (idx.is_colon ())
1395  {
1396  if (nc == 1)
1397  retval = *this;
1398  else
1399  {
1400  // Fast magic colon processing.
1401  retval = Sparse<T> (nel, 1, nz);
1402 
1403  for (octave_idx_type i = 0; i < nc; i++)
1404  {
1405  for (octave_idx_type j = cidx (i); j < cidx (i+1); j++)
1406  {
1407  retval.xdata (j) = data (j);
1408  retval.xridx (j) = ridx (j) + i * nr;
1409  }
1410  }
1411 
1412  retval.xcidx (0) = 0;
1413  retval.xcidx (1) = nz;
1414  }
1415  }
1416  else if (idx.extent (nel) > nel)
1417  {
1418  if (! resize_ok)
1419  octave::err_index_out_of_range (1, 1, idx.extent (nel), nel, dims ());
1420 
1421  // resize_ok is completely handled here.
1422  octave_idx_type ext = idx.extent (nel);
1423  Sparse<T> tmp = *this;
1424  tmp.resize1 (ext);
1425  retval = tmp.index (idx);
1426  }
1427  else if (nr == 1 && nc == 1)
1428  {
1429  // You have to be pretty sick to get to this bit of code,
1430  // since you have a scalar stored as a sparse matrix, and
1431  // then want to make a dense matrix with sparse representation.
1432  // Ok, we'll do it, but you deserve what you get!!
1433  retval = (Sparse<T> (idx_dims(0), idx_dims(1), nz ? data (0) : T ()));
1434  }
1435  else if (nc == 1)
1436  {
1437  // Sparse column vector.
1438  octave_idx_type lb, ub;
1439 
1440  if (idx.is_scalar ())
1441  {
1442  // Scalar index - just a binary lookup.
1443  octave_idx_type i = lblookup (ridx (), nz, idx(0));
1444  if (i < nz && ridx (i) == idx(0))
1445  retval = Sparse (1, 1, data (i));
1446  else
1447  retval = Sparse (1, 1);
1448  }
1449  else if (idx.is_cont_range (nel, lb, ub))
1450  {
1451  // Special-case a contiguous range.
1452  // Look-up indices first.
1453  octave_idx_type li = lblookup (ridx (), nz, lb);
1454  octave_idx_type ui = lblookup (ridx (), nz, ub);
1455  // Copy data and adjust indices.
1456  octave_idx_type nz_new = ui - li;
1457  retval = Sparse<T> (ub - lb, 1, nz_new);
1458  std::copy_n (data () + li, nz_new, retval.data ());
1459  mx_inline_sub (nz_new, retval.xridx (), ridx () + li, lb);
1460  retval.xcidx (1) = nz_new;
1461  }
1462  else if (idx.is_permutation (nel) && idx.isvector ())
1463  {
1464  if (idx.is_range () && idx.increment () == -1)
1465  {
1466  retval = Sparse<T> (nr, 1, nz);
1467 
1468  for (octave_idx_type j = 0; j < nz; j++)
1469  retval.ridx (j) = nr - ridx (nz - j - 1) - 1;
1470 
1471  std::copy_n (cidx (), 2, retval.cidx ());
1472  std::reverse_copy (data (), data () + nz, retval.data ());
1473  }
1474  else
1475  {
1476  Array<T> tmp = array_value ();
1477  tmp = tmp.index (idx);
1478  retval = Sparse<T> (tmp);
1479  }
1480  }
1481  else
1482  {
1483  // If indexing a sparse column vector by a vector, the result is a
1484  // sparse column vector, otherwise it inherits the shape of index.
1485  // Vector transpose is cheap, so do it right here.
1486 
1487  Array<octave_idx_type> tmp_idx = idx.as_array ().as_matrix ();
1488 
1489  const Array<octave_idx_type> idxa = (idx_dims(0) == 1
1490  ? tmp_idx.transpose ()
1491  : tmp_idx);
1492 
1493  octave_idx_type new_nr = idxa.rows ();
1494  octave_idx_type new_nc = idxa.cols ();
1495 
1496  // Lookup.
1497  // FIXME: Could specialize for sorted idx?
1498  NoAlias< Array<octave_idx_type>> lidx (dim_vector (new_nr, new_nc));
1499  for (octave_idx_type i = 0; i < new_nr*new_nc; i++)
1500  lidx(i) = lblookup (ridx (), nz, idxa(i));
1501 
1502  // Count matches.
1503  retval = Sparse<T> (idxa.rows (), idxa.cols ());
1504  for (octave_idx_type j = 0; j < new_nc; j++)
1505  {
1506  octave_idx_type nzj = 0;
1507  for (octave_idx_type i = 0; i < new_nr; i++)
1508  {
1509  octave_idx_type l = lidx(i, j);
1510  if (l < nz && ridx (l) == idxa(i, j))
1511  nzj++;
1512  else
1513  lidx(i, j) = nz;
1514  }
1515  retval.xcidx (j+1) = retval.xcidx (j) + nzj;
1516  }
1517 
1518  retval.change_capacity (retval.xcidx (new_nc));
1519 
1520  // Copy data and set row indices.
1521  octave_idx_type k = 0;
1522  for (octave_idx_type j = 0; j < new_nc; j++)
1523  for (octave_idx_type i = 0; i < new_nr; i++)
1524  {
1525  octave_idx_type l = lidx(i, j);
1526  if (l < nz)
1527  {
1528  retval.data (k) = data (l);
1529  retval.xridx (k++) = i;
1530  }
1531  }
1532  }
1533  }
1534  else if (nr == 1)
1535  {
1536  octave_idx_type lb, ub;
1537  if (idx.is_scalar ())
1538  retval = Sparse<T> (1, 1, elem (0, idx(0)));
1539  else if (idx.is_cont_range (nel, lb, ub))
1540  {
1541  // Special-case a contiguous range.
1542  octave_idx_type lbi = cidx (lb);
1543  octave_idx_type ubi = cidx (ub);
1544  octave_idx_type new_nz = ubi - lbi;
1545  retval = Sparse<T> (1, ub - lb, new_nz);
1546  std::copy_n (data () + lbi, new_nz, retval.data ());
1547  std::fill_n (retval.ridx (), new_nz, static_cast<octave_idx_type> (0));
1548  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1549  }
1550  else
1551  {
1552  // Sparse row vectors occupy O(nr) storage anyway, so let's just
1553  // convert the matrix to full, index, and sparsify the result.
1554  retval = Sparse<T> (array_value ().index (idx));
1555  }
1556  }
1557  else
1558  {
1559  if (nr != 0 && idx.is_scalar ())
1560  retval = Sparse<T> (1, 1, elem (idx(0) % nr, idx(0) / nr));
1561  else
1562  {
1563  // Indexing a non-vector sparse matrix by linear indexing.
1564  // I suppose this is rare (and it may easily overflow), so let's take
1565  // the easy way, and reshape first to column vector, which is already
1566  // handled above.
1567  retval = index (idx_vector::colon).index (idx);
1568  // In this case we're supposed to always inherit the shape, but
1569  // column(row) doesn't do it, so we'll do it instead.
1570  if (idx_dims(0) == 1 && idx_dims(1) != 1)
1571  retval = retval.transpose ();
1572  }
1573  }
1574 
1575  return retval;
1576 }
1577 
1578 template <typename T>
1579 Sparse<T>
1580 Sparse<T>::index (const idx_vector& idx_i, const idx_vector& idx_j,
1581  bool resize_ok) const
1582 {
1583  Sparse<T> retval;
1584 
1585  assert (ndims () == 2);
1586 
1587  octave_idx_type nr = dim1 ();
1588  octave_idx_type nc = dim2 ();
1589 
1590  octave_idx_type n = idx_i.length (nr);
1591  octave_idx_type m = idx_j.length (nc);
1592 
1593  octave_idx_type lb, ub;
1594 
1595  if (idx_i.extent (nr) > nr || idx_j.extent (nc) > nc)
1596  {
1597  // resize_ok is completely handled here.
1598  if (resize_ok)
1599  {
1600  octave_idx_type ext_i = idx_i.extent (nr);
1601  octave_idx_type ext_j = idx_j.extent (nc);
1602  Sparse<T> tmp = *this;
1603  tmp.resize (ext_i, ext_j);
1604  retval = tmp.index (idx_i, idx_j);
1605  }
1606  else if (idx_i.extent (nr) > nr)
1607  octave::err_index_out_of_range (2, 1, idx_i.extent (nr), nr, dims ());
1608  else
1609  octave::err_index_out_of_range (2, 2, idx_j.extent (nc), nc, dims ());
1610  }
1611  else if (nr == 1 && nc == 1)
1612  {
1613  // Scalars stored as sparse matrices occupy more memory than
1614  // a scalar, so let's just convert the matrix to full, index,
1615  // and sparsify the result.
1616 
1617  retval = Sparse<T> (array_value ().index (idx_i, idx_j));
1618  }
1619  else if (idx_i.is_colon ())
1620  {
1621  // Great, we're just manipulating columns. This is going to be quite
1622  // efficient, because the columns can stay compressed as they are.
1623  if (idx_j.is_colon ())
1624  retval = *this; // Shallow copy.
1625  else if (idx_j.is_cont_range (nc, lb, ub))
1626  {
1627  // Special-case a contiguous range.
1628  octave_idx_type lbi = cidx (lb);
1629  octave_idx_type ubi = cidx (ub);
1630  octave_idx_type new_nz = ubi - lbi;
1631  retval = Sparse<T> (nr, ub - lb, new_nz);
1632  std::copy_n (data () + lbi, new_nz, retval.data ());
1633  std::copy_n (ridx () + lbi, new_nz, retval.ridx ());
1634  mx_inline_sub (ub - lb + 1, retval.cidx (), cidx () + lb, lbi);
1635  }
1636  else
1637  {
1638  // Count new nonzero elements.
1639  retval = Sparse<T> (nr, m);
1640  for (octave_idx_type j = 0; j < m; j++)
1641  {
1642  octave_idx_type jj = idx_j(j);
1643  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1644  }
1645 
1646  retval.change_capacity (retval.xcidx (m));
1647 
1648  // Copy data & indices.
1649  for (octave_idx_type j = 0; j < m; j++)
1650  {
1651  octave_idx_type ljj = cidx (idx_j(j));
1652  octave_idx_type lj = retval.xcidx (j);
1653  octave_idx_type nzj = retval.xcidx (j+1) - lj;
1654 
1655  std::copy_n (data () + ljj, nzj, retval.data () + lj);
1656  std::copy_n (ridx () + ljj, nzj, retval.ridx () + lj);
1657  }
1658  }
1659  }
1660  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
1661  {
1662  // It's actually vector indexing. The 1D index is specialized for that.
1663  retval = index (idx_i);
1664 
1665  // If nr == 1 then the vector indexing will return a column vector!!
1666  if (nr == 1)
1667  retval.transpose ();
1668  }
1669  else if (idx_i.is_scalar ())
1670  {
1671  octave_idx_type ii = idx_i(0);
1672  retval = Sparse<T> (1, m);
1674  for (octave_idx_type j = 0; j < m; j++)
1675  {
1676  octave_quit ();
1677  octave_idx_type jj = idx_j(j);
1678  octave_idx_type lj = cidx (jj);
1679  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1680 
1681  // Scalar index - just a binary lookup.
1682  octave_idx_type i = lblookup (ridx () + lj, nzj, ii);
1683  if (i < nzj && ridx (i+lj) == ii)
1684  {
1685  ij[j] = i + lj;
1686  retval.xcidx (j+1) = retval.xcidx (j) + 1;
1687  }
1688  else
1689  retval.xcidx (j+1) = retval.xcidx (j);
1690  }
1691 
1692  retval.change_capacity (retval.xcidx (m));
1693 
1694  // Copy data, adjust row indices.
1695  for (octave_idx_type j = 0; j < m; j++)
1696  {
1697  octave_idx_type i = retval.xcidx (j);
1698  if (retval.xcidx (j+1) > i)
1699  {
1700  retval.xridx (i) = 0;
1701  retval.xdata (i) = data (ij[j]);
1702  }
1703  }
1704  }
1705  else if (idx_i.is_cont_range (nr, lb, ub))
1706  {
1707  retval = Sparse<T> (n, m);
1710  for (octave_idx_type j = 0; j < m; j++)
1711  {
1712  octave_quit ();
1713  octave_idx_type jj = idx_j(j);
1714  octave_idx_type lj = cidx (jj);
1715  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1716  octave_idx_type lij, uij;
1717 
1718  // Lookup indices.
1719  li[j] = lij = lblookup (ridx () + lj, nzj, lb) + lj;
1720  ui[j] = uij = lblookup (ridx () + lj, nzj, ub) + lj;
1721  retval.xcidx (j+1) = retval.xcidx (j) + ui[j] - li[j];
1722  }
1723 
1724  retval.change_capacity (retval.xcidx (m));
1725 
1726  // Copy data, adjust row indices.
1727  for (octave_idx_type j = 0, k = 0; j < m; j++)
1728  {
1729  octave_quit ();
1730  for (octave_idx_type i = li[j]; i < ui[j]; i++)
1731  {
1732  retval.xdata (k) = data (i);
1733  retval.xridx (k++) = ridx (i) - lb;
1734  }
1735  }
1736  }
1737  else if (idx_i.is_permutation (nr))
1738  {
1739  // The columns preserve their length, just need to renumber and sort them.
1740  // Count new nonzero elements.
1741  retval = Sparse<T> (nr, m);
1742  for (octave_idx_type j = 0; j < m; j++)
1743  {
1744  octave_idx_type jj = idx_j(j);
1745  retval.xcidx (j+1) = retval.xcidx (j) + (cidx (jj+1) - cidx (jj));
1746  }
1747 
1748  retval.change_capacity (retval.xcidx (m));
1749 
1750  octave_quit ();
1751 
1752  if (idx_i.is_range () && idx_i.increment () == -1)
1753  {
1754  // It's nr:-1:1. Just flip all columns.
1755  for (octave_idx_type j = 0; j < m; j++)
1756  {
1757  octave_quit ();
1758  octave_idx_type jj = idx_j(j);
1759  octave_idx_type lj = cidx (jj);
1760  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1761  octave_idx_type li = retval.xcidx (j);
1762  octave_idx_type uj = lj + nzj - 1;
1763  for (octave_idx_type i = 0; i < nzj; i++)
1764  {
1765  retval.xdata (li + i) = data (uj - i); // Copy in reverse order.
1766  retval.xridx (li + i) = nr - 1 - ridx (uj - i); // Ditto with transform.
1767  }
1768  }
1769  }
1770  else
1771  {
1772  // Get inverse permutation.
1773  idx_vector idx_iinv = idx_i.inverse_permutation (nr);
1774  const octave_idx_type *iinv = idx_iinv.raw ();
1775 
1776  // Scatter buffer.
1777  OCTAVE_LOCAL_BUFFER (T, scb, nr);
1778  octave_idx_type *rri = retval.ridx ();
1779 
1780  for (octave_idx_type j = 0; j < m; j++)
1781  {
1782  octave_quit ();
1783  octave_idx_type jj = idx_j(j);
1784  octave_idx_type lj = cidx (jj);
1785  octave_idx_type nzj = cidx (jj+1) - cidx (jj);
1786  octave_idx_type li = retval.xcidx (j);
1787  // Scatter the column, transform indices.
1788  for (octave_idx_type i = 0; i < nzj; i++)
1789  scb[rri[li + i] = iinv[ridx (lj + i)]] = data (lj + i);
1790 
1791  octave_quit ();
1792 
1793  // Sort them.
1794  std::sort (rri + li, rri + li + nzj);
1795 
1796  // Gather.
1797  for (octave_idx_type i = 0; i < nzj; i++)
1798  retval.xdata (li + i) = scb[rri[li + i]];
1799  }
1800  }
1801 
1802  }
1803  else if (idx_j.is_colon ())
1804  {
1805  // This requires uncompressing columns, which is generally costly,
1806  // so we rely on the efficient transpose to handle this.
1807  // It may still make sense to optimize some cases here.
1808  retval = transpose ();
1809  retval = retval.index (idx_vector::colon, idx_i);
1810  retval = retval.transpose ();
1811  }
1812  else
1813  {
1814  // A(I, J) is decomposed into A(:, J)(I, :).
1815  retval = index (idx_vector::colon, idx_j);
1816  retval = retval.index (idx_i, idx_vector::colon);
1817  }
1818 
1819  return retval;
1820 }
1821 
1822 template <typename T>
1823 void
1824 Sparse<T>::assign (const idx_vector& idx, const Sparse<T>& rhs)
1825 {
1826  Sparse<T> retval;
1827 
1828  assert (ndims () == 2);
1829 
1830  octave_idx_type nr = dim1 ();
1831  octave_idx_type nc = dim2 ();
1832  octave_idx_type nz = nnz ();
1833 
1834  octave_idx_type n = numel (); // Can throw.
1835 
1836  octave_idx_type rhl = rhs.numel ();
1837 
1838  if (idx.length (n) == rhl)
1839  {
1840  if (rhl == 0)
1841  return;
1842 
1843  octave_idx_type nx = idx.extent (n);
1844  // Try to resize first if necessary.
1845  if (nx != n)
1846  {
1847  resize1 (nx);
1848  n = numel ();
1849  nr = rows ();
1850  nc = cols ();
1851  // nz is preserved.
1852  }
1853 
1854  if (idx.is_colon ())
1855  {
1856  *this = rhs.reshape (dimensions);
1857  }
1858  else if (nc == 1 && rhs.cols () == 1)
1859  {
1860  // Sparse column vector to sparse column vector assignment.
1861 
1862  octave_idx_type lb, ub;
1863  if (idx.is_cont_range (nr, lb, ub))
1864  {
1865  // Special-case a contiguous range.
1866  // Look-up indices first.
1867  octave_idx_type li = lblookup (ridx (), nz, lb);
1868  octave_idx_type ui = lblookup (ridx (), nz, ub);
1869  octave_idx_type rnz = rhs.nnz ();
1870  octave_idx_type new_nz = nz - (ui - li) + rnz;
1871 
1872  if (new_nz >= nz && new_nz <= nzmax ())
1873  {
1874  // Adding/overwriting elements, enough capacity allocated.
1875 
1876  if (new_nz > nz)
1877  {
1878  // Make room first.
1879  std::copy_backward (data () + ui, data () + nz,
1880  data () + nz + rnz);
1881  std::copy_backward (ridx () + ui, ridx () + nz,
1882  ridx () + nz + rnz);
1883  }
1884 
1885  // Copy data and adjust indices from rhs.
1886  std::copy_n (rhs.data (), rnz, data () + li);
1887  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1888  }
1889  else
1890  {
1891  // Clearing elements or exceeding capacity, allocate afresh
1892  // and paste pieces.
1893  const Sparse<T> tmp = *this;
1894  *this = Sparse<T> (nr, 1, new_nz);
1895 
1896  // Head ...
1897  std::copy_n (tmp.data (), li, data ());
1898  std::copy_n (tmp.ridx (), li, ridx ());
1899 
1900  // new stuff ...
1901  std::copy_n (rhs.data (), rnz, data () + li);
1902  mx_inline_add (rnz, ridx () + li, rhs.ridx (), lb);
1903 
1904  // ...tail
1905  std::copy (tmp.data () + ui, tmp.data () + nz,
1906  data () + li + rnz);
1907  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
1908  ridx () + li + rnz);
1909  }
1910 
1911  cidx (1) = new_nz;
1912  }
1913  else if (idx.is_range () && idx.increment () == -1)
1914  {
1915  // It's s(u:-1:l) = r. Reverse the assignment.
1916  assign (idx.sorted (), rhs.index (idx_vector (rhl - 1, 0, -1)));
1917  }
1918  else if (idx.is_permutation (n))
1919  {
1920  *this = rhs.index (idx.inverse_permutation (n));
1921  }
1922  else if (rhs.nnz () == 0)
1923  {
1924  // Elements are being zeroed.
1925  octave_idx_type *ri = ridx ();
1926  for (octave_idx_type i = 0; i < rhl; i++)
1927  {
1928  octave_idx_type iidx = idx(i);
1929  octave_idx_type li = lblookup (ri, nz, iidx);
1930  if (li != nz && ri[li] == iidx)
1931  xdata (li) = T ();
1932  }
1933 
1934  maybe_compress (true);
1935  }
1936  else
1937  {
1938  const Sparse<T> tmp = *this;
1939  octave_idx_type new_nz = nz + rhl;
1940  // Disassembly our matrix...
1941  Array<octave_idx_type> new_ri (dim_vector (new_nz, 1));
1942  Array<T> new_data (dim_vector (new_nz, 1));
1943  std::copy_n (tmp.ridx (), nz, new_ri.fortran_vec ());
1944  std::copy_n (tmp.data (), nz, new_data.fortran_vec ());
1945  // ... insert new data (densified) ...
1946  idx.copy_data (new_ri.fortran_vec () + nz);
1947  new_data.assign (idx_vector (nz, new_nz), rhs.array_value ());
1948  // ... reassembly.
1949  *this = Sparse<T> (new_data, new_ri,
1950  static_cast<octave_idx_type> (0),
1951  nr, nc, false);
1952  }
1953  }
1954  else
1955  {
1956  dim_vector save_dims = dimensions;
1957  *this = index (idx_vector::colon);
1958  assign (idx, rhs.index (idx_vector::colon));
1959  *this = reshape (save_dims);
1960  }
1961  }
1962  else if (rhl == 1)
1963  {
1964  rhl = idx.length (n);
1965  if (rhs.nnz () != 0)
1966  assign (idx, Sparse<T> (rhl, 1, rhs.data (0)));
1967  else
1968  assign (idx, Sparse<T> (rhl, 1));
1969  }
1970  else
1971  octave::err_nonconformant ("=", dim_vector(idx.length (n),1), rhs.dims());
1972 }
1973 
1974 template <typename T>
1975 void
1977  const idx_vector& idx_j, const Sparse<T>& rhs)
1978 {
1979  Sparse<T> retval;
1980 
1981  assert (ndims () == 2);
1982 
1983  octave_idx_type nr = dim1 ();
1984  octave_idx_type nc = dim2 ();
1985  octave_idx_type nz = nnz ();
1986 
1987  octave_idx_type n = rhs.rows ();
1988  octave_idx_type m = rhs.columns ();
1989 
1990  // FIXME: this should probably be written more like the
1991  // Array<T>::assign function...
1992 
1993  bool orig_zero_by_zero = (nr == 0 && nc == 0);
1994 
1995  if (orig_zero_by_zero || (idx_i.length (nr) == n && idx_j.length (nc) == m))
1996  {
1997  octave_idx_type nrx;
1998  octave_idx_type ncx;
1999 
2000  if (orig_zero_by_zero)
2001  {
2002  if (idx_i.is_colon ())
2003  {
2004  nrx = n;
2005 
2006  if (idx_j.is_colon ())
2007  ncx = m;
2008  else
2009  ncx = idx_j.extent (nc);
2010  }
2011  else if (idx_j.is_colon ())
2012  {
2013  nrx = idx_i.extent (nr);
2014  ncx = m;
2015  }
2016  else
2017  {
2018  nrx = idx_i.extent (nr);
2019  ncx = idx_j.extent (nc);
2020  }
2021  }
2022  else
2023  {
2024  nrx = idx_i.extent (nr);
2025  ncx = idx_j.extent (nc);
2026  }
2027 
2028  // Try to resize first if necessary.
2029  if (nrx != nr || ncx != nc)
2030  {
2031  resize (nrx, ncx);
2032  nr = rows ();
2033  nc = cols ();
2034  // nz is preserved.
2035  }
2036 
2037  if (n == 0 || m == 0)
2038  return;
2039 
2040  if (idx_i.is_colon ())
2041  {
2042  octave_idx_type lb, ub;
2043  // Great, we're just manipulating columns. This is going to be quite
2044  // efficient, because the columns can stay compressed as they are.
2045  if (idx_j.is_colon ())
2046  *this = rhs; // Shallow copy.
2047  else if (idx_j.is_cont_range (nc, lb, ub))
2048  {
2049  // Special-case a contiguous range.
2050  octave_idx_type li = cidx (lb);
2051  octave_idx_type ui = cidx (ub);
2052  octave_idx_type rnz = rhs.nnz ();
2053  octave_idx_type new_nz = nz - (ui - li) + rnz;
2054 
2055  if (new_nz >= nz && new_nz <= nzmax ())
2056  {
2057  // Adding/overwriting elements, enough capacity allocated.
2058 
2059  if (new_nz > nz)
2060  {
2061  // Make room first.
2062  std::copy_backward (data () + ui, data () + nz,
2063  data () + new_nz);
2064  std::copy_backward (ridx () + ui, ridx () + nz,
2065  ridx () + new_nz);
2066  mx_inline_add2 (nc - ub, cidx () + ub + 1, new_nz - nz);
2067  }
2068 
2069  // Copy data and indices from rhs.
2070  std::copy_n (rhs.data (), rnz, data () + li);
2071  std::copy_n (rhs.ridx (), rnz, ridx () + li);
2072  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2073  li);
2074 
2075  assert (nnz () == new_nz);
2076  }
2077  else
2078  {
2079  // Clearing elements or exceeding capacity, allocate afresh
2080  // and paste pieces.
2081  const Sparse<T> tmp = *this;
2082  *this = Sparse<T> (nr, nc, new_nz);
2083 
2084  // Head...
2085  std::copy_n (tmp.data (), li, data ());
2086  std::copy_n (tmp.ridx (), li, ridx ());
2087  std::copy_n (tmp.cidx () + 1, lb, cidx () + 1);
2088 
2089  // new stuff...
2090  std::copy_n (rhs.data (), rnz, data () + li);
2091  std::copy_n (rhs.ridx (), rnz, ridx () + li);
2092  mx_inline_add (ub - lb, cidx () + lb + 1, rhs.cidx () + 1,
2093  li);
2094 
2095  // ...tail.
2096  std::copy (tmp.data () + ui, tmp.data () + nz,
2097  data () + li + rnz);
2098  std::copy (tmp.ridx () + ui, tmp.ridx () + nz,
2099  ridx () + li + rnz);
2100  mx_inline_add (nc - ub, cidx () + ub + 1,
2101  tmp.cidx () + ub + 1, new_nz - nz);
2102 
2103  assert (nnz () == new_nz);
2104  }
2105  }
2106  else if (idx_j.is_range () && idx_j.increment () == -1)
2107  {
2108  // It's s(:,u:-1:l) = r. Reverse the assignment.
2109  assign (idx_i, idx_j.sorted (),
2110  rhs.index (idx_i, idx_vector (m - 1, 0, -1)));
2111  }
2112  else if (idx_j.is_permutation (nc))
2113  {
2114  *this = rhs.index (idx_i, idx_j.inverse_permutation (nc));
2115  }
2116  else
2117  {
2118  const Sparse<T> tmp = *this;
2119  *this = Sparse<T> (nr, nc);
2120  OCTAVE_LOCAL_BUFFER_INIT (octave_idx_type, jsav, nc, -1);
2121 
2122  // Assemble column lengths.
2123  for (octave_idx_type i = 0; i < nc; i++)
2124  xcidx (i+1) = tmp.cidx (i+1) - tmp.cidx (i);
2125 
2126  for (octave_idx_type i = 0; i < m; i++)
2127  {
2128  octave_idx_type j =idx_j(i);
2129  jsav[j] = i;
2130  xcidx (j+1) = rhs.cidx (i+1) - rhs.cidx (i);
2131  }
2132 
2133  // Make cumulative.
2134  for (octave_idx_type i = 0; i < nc; i++)
2135  xcidx (i+1) += xcidx (i);
2136 
2137  change_capacity (nnz ());
2138 
2139  // Merge columns.
2140  for (octave_idx_type i = 0; i < nc; i++)
2141  {
2142  octave_idx_type l = xcidx (i);
2143  octave_idx_type u = xcidx (i+1);
2144  octave_idx_type j = jsav[i];
2145  if (j >= 0)
2146  {
2147  // from rhs
2148  octave_idx_type k = rhs.cidx (j);
2149  std::copy_n (rhs.data () + k, u - l, xdata () + l);
2150  std::copy_n (rhs.ridx () + k, u - l, xridx () + l);
2151  }
2152  else
2153  {
2154  // original
2155  octave_idx_type k = tmp.cidx (i);
2156  std::copy_n (tmp.data () + k, u - l, xdata () + l);
2157  std::copy_n (tmp.ridx () + k, u - l, xridx () + l);
2158  }
2159  }
2160 
2161  }
2162  }
2163  else if (nc == 1 && idx_j.is_colon_equiv (nc) && idx_i.isvector ())
2164  {
2165  // It's just vector indexing. The 1D assign is specialized for that.
2166  assign (idx_i, rhs);
2167  }
2168  else if (idx_j.is_colon ())
2169  {
2170  if (idx_i.is_permutation (nr))
2171  {
2172  *this = rhs.index (idx_i.inverse_permutation (nr), idx_j);
2173  }
2174  else
2175  {
2176  // FIXME: optimize more special cases?
2177  // In general this requires unpacking the columns, which is slow,
2178  // especially for many small columns. OTOH, transpose is an
2179  // efficient O(nr+nc+nnz) operation.
2180  *this = transpose ();
2181  assign (idx_vector::colon, idx_i, rhs.transpose ());
2182  *this = transpose ();
2183  }
2184  }
2185  else
2186  {
2187  // Split it into 2 assignments and one indexing.
2188  Sparse<T> tmp = index (idx_vector::colon, idx_j);
2189  tmp.assign (idx_i, idx_vector::colon, rhs);
2190  assign (idx_vector::colon, idx_j, tmp);
2191  }
2192  }
2193  else if (m == 1 && n == 1)
2194  {
2195  n = idx_i.length (nr);
2196  m = idx_j.length (nc);
2197  if (rhs.nnz () != 0)
2198  assign (idx_i, idx_j, Sparse<T> (n, m, rhs.data (0)));
2199  else
2200  assign (idx_i, idx_j, Sparse<T> (n, m));
2201  }
2202  else if (idx_i.length (nr) == m && idx_j.length (nc) == n
2203  && (n == 1 || m == 1))
2204  {
2205  assign (idx_i, idx_j, rhs.transpose ());
2206  }
2207  else
2208  octave::err_nonconformant ("=", idx_i.length (nr), idx_j.length (nc), n, m);
2209 }
2210 
2211 // Can't use versions of these in Array.cc due to duplication of the
2212 // instantiations for Array<double and Sparse<double>, etc
2213 template <typename T>
2214 bool
2216  typename ref_param<T>::type b)
2217 {
2218  return (a < b);
2219 }
2220 
2221 template <typename T>
2222 bool
2224  typename ref_param<T>::type b)
2225 {
2226  return (a > b);
2227 }
2228 
2229 template <typename T>
2230 Sparse<T>
2232 {
2233  Sparse<T> m = *this;
2234 
2235  octave_idx_type nr = m.rows ();
2236  octave_idx_type nc = m.columns ();
2237 
2238  if (m.numel () < 1 || dim > 1)
2239  return m;
2240 
2241  if (dim > 0)
2242  {
2243  m = m.transpose ();
2244  nr = m.rows ();
2245  nc = m.columns ();
2246  }
2247 
2248  octave_sort<T> lsort;
2249 
2250  if (mode == ASCENDING)
2251  lsort.set_compare (sparse_ascending_compare<T>);
2252  else if (mode == DESCENDING)
2253  lsort.set_compare (sparse_descending_compare<T>);
2254  else
2256  ("Sparse<T>::sort: invalid MODE");
2257 
2258  T *v = m.data ();
2259  octave_idx_type *mcidx = m.cidx ();
2260  octave_idx_type *mridx = m.ridx ();
2261 
2262  for (octave_idx_type j = 0; j < nc; j++)
2263  {
2264  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2265  lsort.sort (v, ns);
2266 
2268  if (mode == ASCENDING)
2269  {
2270  for (i = 0; i < ns; i++)
2271  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2272  break;
2273  }
2274  else
2275  {
2276  for (i = 0; i < ns; i++)
2277  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2278  break;
2279  }
2280  for (octave_idx_type k = 0; k < i; k++)
2281  mridx[k] = k;
2282  for (octave_idx_type k = i; k < ns; k++)
2283  mridx[k] = k - ns + nr;
2284 
2285  v += ns;
2286  mridx += ns;
2287  }
2288 
2289  if (dim > 0)
2290  m = m.transpose ();
2291 
2292  return m;
2293 }
2294 
2295 template <typename T>
2296 Sparse<T>
2298  sortmode mode) const
2299 {
2300  Sparse<T> m = *this;
2301 
2302  octave_idx_type nr = m.rows ();
2303  octave_idx_type nc = m.columns ();
2304 
2305  if (m.numel () < 1 || dim > 1)
2306  {
2307  sidx = Array<octave_idx_type> (dim_vector (nr, nc), 1);
2308  return m;
2309  }
2310 
2311  if (dim > 0)
2312  {
2313  m = m.transpose ();
2314  nr = m.rows ();
2315  nc = m.columns ();
2316  }
2317 
2318  octave_sort<T> indexed_sort;
2319 
2320  if (mode == ASCENDING)
2321  indexed_sort.set_compare (sparse_ascending_compare<T>);
2322  else if (mode == DESCENDING)
2323  indexed_sort.set_compare (sparse_descending_compare<T>);
2324  else
2326  ("Sparse<T>::sort: invalid MODE");
2327 
2328  T *v = m.data ();
2329  octave_idx_type *mcidx = m.cidx ();
2330  octave_idx_type *mridx = m.ridx ();
2331 
2332  sidx = Array<octave_idx_type> (dim_vector (nr, nc));
2334 
2335  for (octave_idx_type j = 0; j < nc; j++)
2336  {
2337  octave_idx_type ns = mcidx[j + 1] - mcidx[j];
2338  octave_idx_type offset = j * nr;
2339 
2340  if (ns == 0)
2341  {
2342  for (octave_idx_type k = 0; k < nr; k++)
2343  sidx (offset + k) = k;
2344  }
2345  else
2346  {
2347  for (octave_idx_type i = 0; i < ns; i++)
2348  vi[i] = mridx[i];
2349 
2350  indexed_sort.sort (v, vi, ns);
2351 
2353  if (mode == ASCENDING)
2354  {
2355  for (i = 0; i < ns; i++)
2356  if (sparse_ascending_compare<T> (static_cast<T> (0), v[i]))
2357  break;
2358  }
2359  else
2360  {
2361  for (i = 0; i < ns; i++)
2362  if (sparse_descending_compare<T> (static_cast<T> (0), v[i]))
2363  break;
2364  }
2365 
2366  octave_idx_type ii = 0;
2367  octave_idx_type jj = i;
2368  for (octave_idx_type k = 0; k < nr; k++)
2369  {
2370  if (ii < ns && mridx[ii] == k)
2371  ii++;
2372  else
2373  sidx (offset + jj++) = k;
2374  }
2375 
2376  for (octave_idx_type k = 0; k < i; k++)
2377  {
2378  sidx (k + offset) = vi[k];
2379  mridx[k] = k;
2380  }
2381 
2382  for (octave_idx_type k = i; k < ns; k++)
2383  {
2384  sidx (k - ns + nr + offset) = vi[k];
2385  mridx[k] = k - ns + nr;
2386  }
2387 
2388  v += ns;
2389  mridx += ns;
2390  }
2391  }
2392 
2393  if (dim > 0)
2394  {
2395  m = m.transpose ();
2396  sidx = sidx.transpose ();
2397  }
2398 
2399  return m;
2400 }
2401 
2402 template <typename T>
2403 Sparse<T>
2405 {
2406  octave_idx_type nnr = rows ();
2407  octave_idx_type nnc = cols ();
2408  Sparse<T> d;
2409 
2410  if (nnr == 0 || nnc == 0)
2411  ; // do nothing
2412  else if (nnr != 1 && nnc != 1)
2413  {
2414  if (k > 0)
2415  nnc -= k;
2416  else if (k < 0)
2417  nnr += k;
2418 
2419  if (nnr > 0 && nnc > 0)
2420  {
2421  octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2422 
2423  // Count the number of nonzero elements
2424  octave_idx_type nel = 0;
2425  if (k > 0)
2426  {
2427  for (octave_idx_type i = 0; i < ndiag; i++)
2428  if (elem (i, i+k) != 0.)
2429  nel++;
2430  }
2431  else if (k < 0)
2432  {
2433  for (octave_idx_type i = 0; i < ndiag; i++)
2434  if (elem (i-k, i) != 0.)
2435  nel++;
2436  }
2437  else
2438  {
2439  for (octave_idx_type i = 0; i < ndiag; i++)
2440  if (elem (i, i) != 0.)
2441  nel++;
2442  }
2443 
2444  d = Sparse<T> (ndiag, 1, nel);
2445  d.xcidx (0) = 0;
2446  d.xcidx (1) = nel;
2447 
2448  octave_idx_type ii = 0;
2449  if (k > 0)
2450  {
2451  for (octave_idx_type i = 0; i < ndiag; i++)
2452  {
2453  T tmp = elem (i, i+k);
2454  if (tmp != 0.)
2455  {
2456  d.xdata (ii) = tmp;
2457  d.xridx (ii++) = i;
2458  }
2459  }
2460  }
2461  else if (k < 0)
2462  {
2463  for (octave_idx_type i = 0; i < ndiag; i++)
2464  {
2465  T tmp = elem (i-k, i);
2466  if (tmp != 0.)
2467  {
2468  d.xdata (ii) = tmp;
2469  d.xridx (ii++) = i;
2470  }
2471  }
2472  }
2473  else
2474  {
2475  for (octave_idx_type i = 0; i < ndiag; i++)
2476  {
2477  T tmp = elem (i, i);
2478  if (tmp != 0.)
2479  {
2480  d.xdata (ii) = tmp;
2481  d.xridx (ii++) = i;
2482  }
2483  }
2484  }
2485  }
2486  else
2487  {
2488  // Matlab returns [] 0x1 for out-of-range diagonal
2489 
2490  octave_idx_type nr = 0;
2491  octave_idx_type nc = 1;
2492  octave_idx_type nz = 0;
2493 
2494  d = Sparse<T> (nr, nc, nz);
2495  }
2496  }
2497  else if (nnr != 0 && nnc != 0)
2498  {
2499  octave_idx_type roff = 0;
2500  octave_idx_type coff = 0;
2501  if (k > 0)
2502  {
2503  roff = 0;
2504  coff = k;
2505  }
2506  else if (k < 0)
2507  {
2508  roff = -k;
2509  coff = 0;
2510  }
2511 
2512  if (nnr == 1)
2513  {
2514  octave_idx_type n = nnc + std::abs (k);
2515  octave_idx_type nz = nnz ();
2516 
2517  d = Sparse<T> (n, n, nz);
2518 
2519  if (nnz () > 0)
2520  {
2521  for (octave_idx_type i = 0; i < coff+1; i++)
2522  d.xcidx (i) = 0;
2523 
2524  for (octave_idx_type j = 0; j < nnc; j++)
2525  {
2526  for (octave_idx_type i = cidx (j); i < cidx (j+1); i++)
2527  {
2528  d.xdata (i) = data (i);
2529  d.xridx (i) = j + roff;
2530  }
2531  d.xcidx (j + coff + 1) = cidx (j+1);
2532  }
2533 
2534  for (octave_idx_type i = nnc + coff + 1; i < n + 1; i++)
2535  d.xcidx (i) = nz;
2536  }
2537  }
2538  else
2539  {
2540  octave_idx_type n = nnr + std::abs (k);
2541  octave_idx_type nz = nnz ();
2542 
2543  d = Sparse<T> (n, n, nz);
2544 
2545  if (nnz () > 0)
2546  {
2547  octave_idx_type ii = 0;
2548  octave_idx_type ir = ridx (0);
2549 
2550  for (octave_idx_type i = 0; i < coff+1; i++)
2551  d.xcidx (i) = 0;
2552 
2553  for (octave_idx_type i = 0; i < nnr; i++)
2554  {
2555  if (ir == i)
2556  {
2557  d.xdata (ii) = data (ii);
2558  d.xridx (ii++) = ir + roff;
2559 
2560  if (ii != nz)
2561  ir = ridx (ii);
2562  }
2563  d.xcidx (i + coff + 1) = ii;
2564  }
2565 
2566  for (octave_idx_type i = nnr + coff + 1; i < n+1; i++)
2567  d.xcidx (i) = nz;
2568  }
2569  }
2570  }
2571 
2572  return d;
2573 }
2574 
2575 template <typename T>
2576 Sparse<T>
2577 Sparse<T>::cat (int dim, octave_idx_type n, const Sparse<T> *sparse_list)
2578 {
2579  // Default concatenation.
2580  bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2581 
2582  if (dim == -1 || dim == -2)
2583  {
2584  concat_rule = &dim_vector::hvcat;
2585  dim = -dim - 1;
2586  }
2587  else if (dim < 0)
2588  (*current_liboctave_error_handler) ("cat: invalid dimension");
2589 
2590  dim_vector dv;
2591  octave_idx_type total_nz = 0;
2592  if (dim != 0 && dim != 1)
2593  (*current_liboctave_error_handler)
2594  ("cat: invalid dimension for sparse concatenation");
2595 
2596  if (n == 1)
2597  return sparse_list[0];
2598 
2599  for (octave_idx_type i = 0; i < n; i++)
2600  {
2601  if (! (dv.*concat_rule) (sparse_list[i].dims (), dim))
2602  (*current_liboctave_error_handler) ("cat: dimension mismatch");
2603 
2604  total_nz += sparse_list[i].nnz ();
2605  }
2606 
2607  Sparse<T> retval (dv, total_nz);
2608 
2609  if (retval.isempty ())
2610  return retval;
2611 
2612  switch (dim)
2613  {
2614  case 0:
2615  {
2616  // sparse vertcat. This is not efficiently handled by assignment,
2617  // so we'll do it directly.
2618  octave_idx_type l = 0;
2619  for (octave_idx_type j = 0; j < dv(1); j++)
2620  {
2621  octave_quit ();
2622 
2623  octave_idx_type rcum = 0;
2624  for (octave_idx_type i = 0; i < n; i++)
2625  {
2626  const Sparse<T>& spi = sparse_list[i];
2627  // Skipping empty matrices. See the comment in Array.cc.
2628  if (spi.isempty ())
2629  continue;
2630 
2631  octave_idx_type kl = spi.cidx (j);
2632  octave_idx_type ku = spi.cidx (j+1);
2633  for (octave_idx_type k = kl; k < ku; k++, l++)
2634  {
2635  retval.xridx (l) = spi.ridx (k) + rcum;
2636  retval.xdata (l) = spi.data (k);
2637  }
2638 
2639  rcum += spi.rows ();
2640  }
2641 
2642  retval.xcidx (j+1) = l;
2643  }
2644 
2645  break;
2646  }
2647  case 1:
2648  {
2649  octave_idx_type l = 0;
2650  for (octave_idx_type i = 0; i < n; i++)
2651  {
2652  octave_quit ();
2653 
2654  // Skipping empty matrices. See the comment in Array.cc.
2655  if (sparse_list[i].isempty ())
2656  continue;
2657 
2658  octave_idx_type u = l + sparse_list[i].columns ();
2660  sparse_list[i]);
2661  l = u;
2662  }
2663 
2664  break;
2665  }
2666  default:
2667  assert (false);
2668  }
2669 
2670  return retval;
2671 }
2672 
2673 template <typename T>
2674 Array<T>
2676 {
2677  NoAlias< Array<T>> retval (dims (), T ());
2678  if (rows () == 1)
2679  {
2680  octave_idx_type i = 0;
2681  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2682  {
2683  if (cidx (j+1) > i)
2684  retval(j) = data (i++);
2685  }
2686  }
2687  else
2688  {
2689  for (octave_idx_type j = 0, nc = cols (); j < nc; j++)
2690  for (octave_idx_type i = cidx (j), iu = cidx (j+1); i < iu; i++)
2691  retval(ridx (i), j) = data (i);
2692  }
2693 
2694  return retval;
2695 }
2696 
2697 template <typename T>
2698 std::istream&
2700  T (*read_fcn) (std::istream&))
2701 {
2702  octave_idx_type nr = a.rows ();
2703  octave_idx_type nc = a.cols ();
2704  octave_idx_type nz = a.nzmax ();
2705 
2706  if (nr > 0 && nc > 0)
2707  {
2708  octave_idx_type itmp;
2709  octave_idx_type jtmp;
2710  octave_idx_type iold = 0;
2711  octave_idx_type jold = 0;
2712  octave_idx_type ii = 0;
2713  T tmp;
2714 
2715  a.cidx (0) = 0;
2716  for (octave_idx_type i = 0; i < nz; i++)
2717  {
2718  itmp = 0; jtmp = 0;
2719  is >> itmp;
2720  itmp--;
2721 
2722  is >> jtmp;
2723  jtmp--;
2724 
2725  if (is.fail ())
2726  {
2727  is.clear();
2728  std::string err_field;
2729  is >> err_field;
2730  (*current_liboctave_error_handler)
2731  ("invalid sparse matrix: element %d: "
2732  "Symbols '%s' is not an integer format",
2733  i+1, err_field.c_str ());
2734  }
2735 
2736  if (itmp < 0 || itmp >= nr)
2737  {
2738  is.setstate (std::ios::failbit);
2739 
2740  (*current_liboctave_error_handler)
2741  ("invalid sparse matrix: element %d: "
2742  "row index = %d out of range",
2743  i+1, itmp + 1);
2744  }
2745 
2746  if (jtmp < 0 || jtmp >= nc)
2747  {
2748  is.setstate (std::ios::failbit);
2749 
2750  (*current_liboctave_error_handler)
2751  ("invalid sparse matrix: element %d: "
2752  "column index = %d out of range",
2753  i+1, jtmp + 1);
2754  }
2755 
2756  if (jtmp < jold)
2757  {
2758  is.setstate (std::ios::failbit);
2759 
2760  (*current_liboctave_error_handler)
2761  ("invalid sparse matrix: element %d:"
2762  "column indices must appear in ascending order (%d < %d)",
2763  i+1, jtmp, jold);
2764  }
2765  else if (jtmp > jold)
2766  {
2767  for (octave_idx_type j = jold; j < jtmp; j++)
2768  a.cidx (j+1) = ii;
2769  }
2770  else if (itmp < iold)
2771  {
2772  is.setstate (std::ios::failbit);
2773 
2774  (*current_liboctave_error_handler)
2775  ("invalid sparse matrix: element %d: "
2776  "row indices must appear in ascending order in each column "
2777  "(%d < %d)",
2778  i+1, iold, itmp);
2779  }
2780 
2781  iold = itmp;
2782  jold = jtmp;
2783 
2784  tmp = read_fcn (is);
2785 
2786  if (! is)
2787  return is; // Problem, return is in error state
2788 
2789  a.data (ii) = tmp;
2790  a.ridx (ii++) = itmp;
2791  }
2792 
2793  for (octave_idx_type j = jold; j < nc; j++)
2794  a.cidx (j+1) = ii;
2795  }
2796 
2797  return is;
2798 }
2799 
2800 /*
2801  * Tests
2802  *
2803 
2804 %!function x = set_slice (x, dim, slice, arg)
2805 %! switch (dim)
2806 %! case 11
2807 %! x(slice) = 2;
2808 %! case 21
2809 %! x(slice, :) = 2;
2810 %! case 22
2811 %! x(:, slice) = 2;
2812 %! otherwise
2813 %! error ("invalid dim, '%d'", dim);
2814 %! endswitch
2815 %!endfunction
2816 
2817 %!function x = set_slice2 (x, dim, slice)
2818 %! switch (dim)
2819 %! case 11
2820 %! x(slice) = 2 * ones (size (slice));
2821 %! case 21
2822 %! x(slice, :) = 2 * ones (length (slice), columns (x));
2823 %! case 22
2824 %! x(:, slice) = 2 * ones (rows (x), length (slice));
2825 %! otherwise
2826 %! error ("invalid dim, '%d'", dim);
2827 %! endswitch
2828 %!endfunction
2829 
2830 %!function test_sparse_slice (size, dim, slice)
2831 %! x = ones (size);
2832 %! s = set_slice (sparse (x), dim, slice);
2833 %! f = set_slice (x, dim, slice);
2834 %! assert (nnz (s), nnz (f));
2835 %! assert (full (s), f);
2836 %! s = set_slice2 (sparse (x), dim, slice);
2837 %! f = set_slice2 (x, dim, slice);
2838 %! assert (nnz (s), nnz (f));
2839 %! assert (full (s), f);
2840 %!endfunction
2841 
2842 #### 1d indexing
2843 
2844 ## size = [2 0]
2845 %!test test_sparse_slice ([2 0], 11, []);
2846 %!assert (set_slice (sparse (ones ([2 0])), 11, 1), sparse ([2 0]')) # sparse different from full
2847 %!assert (set_slice (sparse (ones ([2 0])), 11, 2), sparse ([0 2]')) # sparse different from full
2848 %!assert (set_slice (sparse (ones ([2 0])), 11, 3), sparse ([0 0; 2 0]')) # sparse different from full
2849 %!assert (set_slice (sparse (ones ([2 0])), 11, 4), sparse ([0 0; 0 2]')) # sparse different from full
2850 
2851 ## size = [0 2]
2852 %!test test_sparse_slice ([0 2], 11, []);
2853 %!assert (set_slice (sparse (ones ([0 2])), 11, 1), sparse ([2 0])) # sparse different from full
2854 %!test test_sparse_slice ([0 2], 11, 2);
2855 %!test test_sparse_slice ([0 2], 11, 3);
2856 %!test test_sparse_slice ([0 2], 11, 4);
2857 %!test test_sparse_slice ([0 2], 11, [4, 4]);
2858 
2859 ## size = [2 1]
2860 %!test test_sparse_slice ([2 1], 11, []);
2861 %!test test_sparse_slice ([2 1], 11, 1);
2862 %!test test_sparse_slice ([2 1], 11, 2);
2863 %!test test_sparse_slice ([2 1], 11, 3);
2864 %!test test_sparse_slice ([2 1], 11, 4);
2865 %!test test_sparse_slice ([2 1], 11, [4, 4]);
2866 
2867 ## size = [1 2]
2868 %!test test_sparse_slice ([1 2], 11, []);
2869 %!test test_sparse_slice ([1 2], 11, 1);
2870 %!test test_sparse_slice ([1 2], 11, 2);
2871 %!test test_sparse_slice ([1 2], 11, 3);
2872 %!test test_sparse_slice ([1 2], 11, 4);
2873 %!test test_sparse_slice ([1 2], 11, [4, 4]);
2874 
2875 ## size = [2 2]
2876 %!test test_sparse_slice ([2 2], 11, []);
2877 %!test test_sparse_slice ([2 2], 11, 1);
2878 %!test test_sparse_slice ([2 2], 11, 2);
2879 %!test test_sparse_slice ([2 2], 11, 3);
2880 %!test test_sparse_slice ([2 2], 11, 4);
2881 %!test test_sparse_slice ([2 2], 11, [4, 4]);
2882 # These 2 errors are the same as in the full case
2883 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 5)
2884 %!error id=Octave:invalid-resize set_slice (sparse (ones ([2 2])), 11, 6)
2885 
2886 #### 2d indexing
2887 
2888 ## size = [2 0]
2889 %!test test_sparse_slice ([2 0], 21, []);
2890 %!test test_sparse_slice ([2 0], 21, 1);
2891 %!test test_sparse_slice ([2 0], 21, 2);
2892 %!test test_sparse_slice ([2 0], 21, [2,2]);
2893 %!assert (set_slice (sparse (ones ([2 0])), 21, 3), sparse (3,0))
2894 %!assert (set_slice (sparse (ones ([2 0])), 21, 4), sparse (4,0))
2895 %!test test_sparse_slice ([2 0], 22, []);
2896 %!test test_sparse_slice ([2 0], 22, 1);
2897 %!test test_sparse_slice ([2 0], 22, 2);
2898 %!test test_sparse_slice ([2 0], 22, [2,2]);
2899 %!assert (set_slice (sparse (ones ([2 0])), 22, 3), sparse ([0 0 2;0 0 2])) # sparse different from full
2900 %!assert (set_slice (sparse (ones ([2 0])), 22, 4), sparse ([0 0 0 2;0 0 0 2])) # sparse different from full
2901 
2902 ## size = [0 2]
2903 %!test test_sparse_slice ([0 2], 21, []);
2904 %!test test_sparse_slice ([0 2], 21, 1);
2905 %!test test_sparse_slice ([0 2], 21, 2);
2906 %!test test_sparse_slice ([0 2], 21, [2,2]);
2907 %!assert (set_slice (sparse (ones ([0 2])), 21, 3), sparse ([0 0;0 0;2 2])) # sparse different from full
2908 %!assert (set_slice (sparse (ones ([0 2])), 21, 4), sparse ([0 0;0 0;0 0;2 2])) # sparse different from full
2909 %!test test_sparse_slice ([0 2], 22, []);
2910 %!test test_sparse_slice ([0 2], 22, 1);
2911 %!test test_sparse_slice ([0 2], 22, 2);
2912 %!test test_sparse_slice ([0 2], 22, [2,2]);
2913 %!assert (set_slice (sparse (ones ([0 2])), 22, 3), sparse (0,3))
2914 %!assert (set_slice (sparse (ones ([0 2])), 22, 4), sparse (0,4))
2915 
2916 ## size = [2 1]
2917 %!test test_sparse_slice ([2 1], 21, []);
2918 %!test test_sparse_slice ([2 1], 21, 1);
2919 %!test test_sparse_slice ([2 1], 21, 2);
2920 %!test test_sparse_slice ([2 1], 21, [2,2]);
2921 %!test test_sparse_slice ([2 1], 21, 3);
2922 %!test test_sparse_slice ([2 1], 21, 4);
2923 %!test test_sparse_slice ([2 1], 22, []);
2924 %!test test_sparse_slice ([2 1], 22, 1);
2925 %!test test_sparse_slice ([2 1], 22, 2);
2926 %!test test_sparse_slice ([2 1], 22, [2,2]);
2927 %!test test_sparse_slice ([2 1], 22, 3);
2928 %!test test_sparse_slice ([2 1], 22, 4);
2929 
2930 ## size = [1 2]
2931 %!test test_sparse_slice ([1 2], 21, []);
2932 %!test test_sparse_slice ([1 2], 21, 1);
2933 %!test test_sparse_slice ([1 2], 21, 2);
2934 %!test test_sparse_slice ([1 2], 21, [2,2]);
2935 %!test test_sparse_slice ([1 2], 21, 3);
2936 %!test test_sparse_slice ([1 2], 21, 4);
2937 %!test test_sparse_slice ([1 2], 22, []);
2938 %!test test_sparse_slice ([1 2], 22, 1);
2939 %!test test_sparse_slice ([1 2], 22, 2);
2940 %!test test_sparse_slice ([1 2], 22, [2,2]);
2941 %!test test_sparse_slice ([1 2], 22, 3);
2942 %!test test_sparse_slice ([1 2], 22, 4);
2943 
2944 ## size = [2 2]
2945 %!test test_sparse_slice ([2 2], 21, []);
2946 %!test test_sparse_slice ([2 2], 21, 1);
2947 %!test test_sparse_slice ([2 2], 21, 2);
2948 %!test test_sparse_slice ([2 2], 21, [2,2]);
2949 %!test test_sparse_slice ([2 2], 21, 3);
2950 %!test test_sparse_slice ([2 2], 21, 4);
2951 %!test test_sparse_slice ([2 2], 22, []);
2952 %!test test_sparse_slice ([2 2], 22, 1);
2953 %!test test_sparse_slice ([2 2], 22, 2);
2954 %!test test_sparse_slice ([2 2], 22, [2,2]);
2955 %!test test_sparse_slice ([2 2], 22, 3);
2956 %!test test_sparse_slice ([2 2], 22, 4);
2957 
2958 %!assert <35570> (speye (3,1)(3:-1:1), sparse ([0; 0; 1]))
2959 
2960 ## Test removing columns
2961 %!test <36656>
2962 %! s = sparse (magic (5));
2963 %! s(:,2:4) = [];
2964 %! assert (s, sparse (magic (5)(:, [1,5])));
2965 
2966 %!test
2967 %! s = sparse ([], [], [], 1, 1);
2968 %! s(1,:) = [];
2969 %! assert (s, sparse ([], [], [], 0, 1));
2970 
2971 ## Test (bug #37321)
2972 %!test <37321> a=sparse (0,0); assert (all (a) == sparse ([1]));
2973 %!test <37321> a=sparse (0,1); assert (all (a) == sparse ([1]));
2974 %!test <37321> a=sparse (1,0); assert (all (a) == sparse ([1]));
2975 %!test <37321> a=sparse (1,0); assert (all (a,2) == sparse ([1]));
2976 %!test <37321> a=sparse (1,0); assert (size (all (a,1)), [1 0]);
2977 %!test <37321> a=sparse (1,1);
2978 %! assert (all (a) == sparse ([0]));
2979 %! assert (size (all (a)), [1 1]);
2980 %!test <37321> a=sparse (2,1);
2981 %! assert (all (a) == sparse ([0]));
2982 %! assert (size (all (a)), [1 1]);
2983 %!test <37321> a=sparse (1,2);
2984 %! assert (all (a) == sparse ([0]));
2985 %! assert (size (all (a)), [1 1]);
2986 %!test <37321> a=sparse (2,2); assert (isequal (all (a), sparse ([0 0])));
2987 
2988 ## Test assigning row to a column slice
2989 %!test <45589>
2990 %! a = sparse (magic (3));
2991 %! b = a;
2992 %! a(1,:) = 1:3;
2993 %! b(1,:) = (1:3)';
2994 %! assert (a, b);
2995 
2996 */
2997 
2998 template <typename T>
2999 void
3000 Sparse<T>::print_info (std::ostream& os, const std::string& prefix) const
3001 {
3002  os << prefix << "rep address: " << rep << "\n"
3003  << prefix << "rep->nzmx: " << rep->nzmx << "\n"
3004  << prefix << "rep->nrows: " << rep->nrows << "\n"
3005  << prefix << "rep->ncols: " << rep->ncols << "\n"
3006  << prefix << "rep->data: " << static_cast<void *> (rep->d) << "\n"
3007  << prefix << "rep->ridx: " << static_cast<void *> (rep->r) << "\n"
3008  << prefix << "rep->cidx: " << static_cast<void *> (rep->c) << "\n"
3009  << prefix << "rep->count: " << rep->count << "\n";
3010 }
3011 
3012 #define INSTANTIATE_SPARSE(T, API) \
3013  template class API Sparse<T>; \
3014  template std::istream& \
3015  read_sparse_matrix<T> (std::istream& is, Sparse<T>& a, \
3016  T (*read_fcn) (std::istream&));
T & elem(octave_idx_type _r, octave_idx_type _c)
Definition: Sparse.cc:64
octave_idx_type rows(void) const
Definition: Array.h:404
std::string str(char sep='x') const
Definition: dim-vector.cc:73
void mx_inline_add2(size_t n, R *r, const X *x)
Definition: mx-inlines.cc:125
virtual ~Sparse(void)
Definition: Sparse.cc:687
OCTAVE_NORETURN T range_error(const char *fcn, octave_idx_type n) const
Definition: Sparse.cc:736
Sparse< T > permute(const Array< octave_idx_type > &vec, bool inv=false) const
Definition: Sparse.cc:891
void resize(octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:950
T * data(void)
Definition: Sparse.h:486
dim_vector orig_dimensions(void) const
Definition: idx-vector.h:593
void change_length(octave_idx_type nz)
Definition: Sparse.cc:140
const octave_base_value const Array< octave_idx_type > & ra_idx
dim_vector dimensions
Definition: Sparse.h:157
bool isempty(void) const
Definition: ov.h:529
#define OCTAVE_LOCAL_BUFFER_INIT(T, buf, size, value)
Definition: oct-locbuf.h:47
const T * data(void) const
Definition: Array.h:582
octave_idx_type extent(octave_idx_type n) const
Definition: idx-vector.h:560
static const idx_vector colon
Definition: idx-vector.h:498
sortmode
Definition: oct-sort.h:105
Sparse< T > index(const idx_vector &i, bool resize_ok=false) const
Definition: Sparse.cc:1380
identity matrix If supplied two scalar respectively For allows like xample val
Definition: data.cc:4986
void delete_elements(const idx_vector &i)
Definition: Sparse.cc:1148
octave_idx_type safe_numel(void) const
The following function will throw a std::bad_alloc () exception if the requested size is larger than ...
Definition: dim-vector.cc:103
Return the CPU time used by your Octave session The first output is the total time spent executing your process and is equal to the sum of second and third which are the number of CPU seconds spent executing in user mode and the number of CPU seconds spent executing in system mode
Definition: data.cc:6348
OCTAVE_NORETURN liboctave_error_handler current_liboctave_error_handler
Definition: lo-error.c:38
octave_idx_type columns(void) const
Definition: Sparse.h:260
void set_compare(compare_fcn_type comp)
Definition: oct-sort.h:128
octave_idx_type nzmx
Definition: Sparse.h:65
octave_idx_type * cidx(void)
Definition: Sparse.h:508
for large enough k
Definition: lu.cc:617
void resize(int n, int fill_value=0)
Definition: dim-vector.h:310
const T * fortran_vec(void) const
Definition: Array.h:584
static void transpose(octave_idx_type N, const octave_idx_type *ridx, const octave_idx_type *cidx, octave_idx_type *ridx2, octave_idx_type *cidx2)
Definition: symrcm.cc:386
Sparse< T > sort(octave_idx_type dim=0, sortmode mode=ASCENDING) const
Definition: Sparse.cc:2231
bool isnan(bool)
Definition: lo-mappers.h:187
void err_index_out_of_range(int nd, int dim, octave_idx_type idx, octave_idx_type ext)
Sparse(void)
Definition: Sparse.h:165
Sparse< T > diag(octave_idx_type k=0) const
Definition: Sparse.cc:2404
idx_vector inverse_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1168
Sparse< T > transpose(void) const
Definition: Sparse.cc:1092
Sparse< T >::SparseRep * rep
Definition: Sparse.h:155
static T abs(T x)
Definition: pr-output.cc:1696
bool sparse_indices_ok(octave_idx_type *r, octave_idx_type *c, octave_idx_type nrows, octave_idx_type ncols, octave_idx_type nnz)
Definition: sparse-util.cc:84
octave_idx_type * r
Definition: Sparse.h:63
u
Definition: lu.cc:138
bool is_cont_range(octave_idx_type n, octave_idx_type &l, octave_idx_type &u) const
Definition: idx-vector.cc:953
nd example oindent opens the file binary numeric values will be read assuming they are stored in IEEE format with the least significant bit and then converted to the native representation Opening a file that is already open simply opens it again and returns a separate file id It is not an error to open a file several though writing to the same file through several different file ids may produce unexpected results The possible values of text mode reading and writing automatically converts linefeeds to the appropriate line end character for the you may append a you must also open the file in binary mode The parameter conversions are currently only supported for and permissions will be set to and then everything is written in a single operation This is very efficient and improves performance c
Definition: file-io.cc:587
octave_idx_type * c
Definition: Sparse.h:64
virtual octave_idx_type numel(const octave_value_list &)
Definition: ov-base.cc:193
s
Definition: file-io.cc:2729
Array< T > as_matrix(void) const
Return the array as a matrix.
Definition: Array.h:390
octave_function * fcn
Definition: ov-class.cc:1754
F77_RET_T const F77_REAL const F77_REAL F77_REAL &F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE &F77_RET_T const F77_DBLE F77_DBLE &F77_RET_T const F77_REAL F77_REAL &F77_RET_T const F77_DBLE const F77_DBLE F77_DBLE * d
octave_idx_type cols(void) const
Definition: Array.h:412
calling an anonymous function involves an overhead quite comparable to the overhead of an m file function Passing a handle to a built in function is because the interpreter is not involved in the internal loop For a
Definition: cellfun.cc:400
bool sparse_ascending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2215
bool concat(const dim_vector &dvb, int dim)
This corresponds to cat().
Definition: dim-vector.cc:145
octave_idx_type compute_index(const Array< octave_idx_type > &ra_idx) const
Definition: Sparse.cc:713
void err_del_index_out_of_range(bool is1d, octave_idx_type idx, octave_idx_type ext)
bool is_permutation(octave_idx_type n) const
Definition: idx-vector.cc:1139
void maybe_compress(bool remove_zeros)
Definition: Sparse.cc:116
octave_idx_type nnz(void) const
Actual number of nonzero terms.
Definition: Sparse.h:240
Compressed Column Sparse(rows=3, cols=4, nnz=2 [17%])(1
std::istream & read_sparse_matrix(std::istream &is, Sparse< T > &a, T(*read_fcn)(std::istream &))
Definition: Sparse.cc:2699
octave_idx_type numel(void) const
Definition: Sparse.h:244
static Sparse< T >::SparseRep * nil_rep(void)
Definition: Sparse.cc:56
void change_capacity(octave_idx_type nz)
Definition: Sparse.h:462
octave_value & assign(assign_op op, const std::string &type, const std::list< octave_value_list > &idx, const octave_value &rhs)
void mx_inline_sub(size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:107
bool is_range(void) const
Definition: idx-vector.h:581
bool sparse_descending_compare(typename ref_param< T >::type a, typename ref_param< T >::type b)
Definition: Sparse.cc:2223
octave_idx_type ncols
Definition: Sparse.h:67
bool is_colon(void) const
Definition: idx-vector.h:575
void copy_data(octave_idx_type *data) const
Definition: idx-vector.cc:1050
static int elem
Definition: __contourc__.cc:47
double tmp
Definition: data.cc:6252
void err_nonconformant(const char *op, octave_idx_type op1_len, octave_idx_type op2_len)
octave_idx_type length(octave_idx_type n=0) const
Definition: idx-vector.h:557
octave_value retval
Definition: data.cc:6246
octave_idx_type increment(void) const
Definition: idx-vector.cc:1007
idx_vector sorted(bool uniq=false) const
Definition: idx-vector.h:587
octave_idx_type * ridx(void)
Definition: Sparse.h:495
octave_idx_type * xridx(void)
Definition: Sparse.h:501
bool isvector(void) const
Definition: idx-vector.cc:1277
Array< T > transpose(void) const
Definition: Array.cc:1598
Sparse< T > & operator=(const Sparse< T > &a)
Definition: Sparse.cc:695
Array< T > array_value(void) const
Definition: Sparse.cc:2675
the exceeded dimensions are set to if fewer subscripts than dimensions are the exceeding dimensions are merged into the final requested dimension For consider the following dims
Definition: sub2ind.cc:255
dim_vector redim(int n) const
Force certain dimensionality, preserving numel ().
Definition: dim-vector.cc:233
if_then_else< is_class_type< T >::no, T, T const & >::result type
Definition: lo-traits.h:118
void err_invalid_resize(void)
bool any_element_is_nan(void) const
Definition: Sparse.cc:177
octave_idx_type cols(void) const
Definition: Sparse.h:259
T celem(octave_idx_type _r, octave_idx_type _c) const
Definition: Sparse.cc:105
charNDArray max(char d, const charNDArray &m)
Definition: chNDArray.cc:227
void resize1(octave_idx_type n)
Definition: Sparse.cc:919
T::size_type numel(const T &str)
Definition: oct-string.cc:61
void sort(T *data, octave_idx_type nel)
Definition: oct-sort.cc:1506
This is a simple wrapper template that will subclass an Array<T> type or any later type derived from ...
Definition: Array.h:892
bool is_colon_equiv(octave_idx_type n) const
Definition: idx-vector.h:584
octave_idx_type numel(int n=0) const
Number of elements that a matrix with this dimensions would have.
Definition: dim-vector.h:362
Sparse< T > & insert(const Sparse< T > &a, octave_idx_type r, octave_idx_type c)
Definition: Sparse.cc:1000
p
Definition: lu.cc:138
T * xdata(void)
Definition: Sparse.h:488
Sparse< T > reshape(const dim_vector &new_dims) const
Definition: Sparse.cc:812
b
Definition: cellfun.cc:400
bool hvcat(const dim_vector &dvb, int dim)
This corresponds to [,] (horzcat, dim = 0) and [;] (vertcat, dim = 1).
Definition: dim-vector.cc:208
#define OCTAVE_LOCAL_BUFFER(T, buf, size)
Definition: oct-locbuf.h:41
Sparse< T > maybe_compress(bool remove_zeros=false)
Definition: Sparse.h:438
void assign(const idx_vector &i, const Array< T > &rhs, const T &rfv)
Indexed assignment (always with resize & fill).
Definition: Array.cc:1115
dim_vector dims(void) const
Definition: Sparse.h:278
for i
Definition: data.cc:5264
void mx_inline_add(size_t n, R *r, const X *x, const Y *y)
Definition: mx-inlines.cc:106
idx_vector complement(octave_idx_type n) const
Definition: idx-vector.cc:1108
bool isempty(void) const
Definition: Sparse.h:478
octave_idx_type ndims(void) const
Number of dimensions.
Definition: dim-vector.h:295
octave_idx_type * xcidx(void)
Definition: Sparse.h:514
Array< octave_idx_type > as_array(void) const
Definition: idx-vector.cc:1271
void assign(const idx_vector &i, const Sparse< T > &rhs)
Definition: Sparse.cc:1824
octave_idx_type numel(void) const
Number of elements in the array.
Definition: Array.h:366
write the output to stdout if nargout is
Definition: load-save.cc:1612
bool indices_ok(void) const
Definition: Sparse.cc:170
Vector representing the dimensions (size) of an Array.
Definition: dim-vector.h:87
bool is_scalar(void) const
Definition: idx-vector.h:578
const octave_idx_type * raw(void)
Definition: idx-vector.cc:1037
If this string is the system will ring the terminal sometimes it is useful to be able to print the original representation of the string
Definition: utils.cc:888
dim_vector dv
Definition: sub2ind.cc:263
static octave_idx_type lblookup(const octave_idx_type *ridx, octave_idx_type nr, octave_idx_type ri)
Definition: Sparse.cc:1131
octave::stream os
Definition: file-io.cc:627
octave_idx_type rows(void) const
Definition: Sparse.h:258
static Sparse< T > cat(int dim, octave_idx_type n, const Sparse< T > *sparse_list)
Definition: Sparse.cc:2577
charNDArray min(char d, const charNDArray &m)
Definition: chNDArray.cc:204
void print_info(std::ostream &os, const std::string &prefix) const
Definition: Sparse.cc:3000