GNU Octave  3.8.0
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
Array.cc
Go to the documentation of this file.
1 // Template array classes
2 /*
3 
4 Copyright (C) 1993-2013 John W. Eaton
5 Copyright (C) 2008-2009 Jaroslav Hajek
6 Copyright (C) 2009 VZLU Prague
7 
8 This file is part of Octave.
9 
10 Octave is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published by the
12 Free Software Foundation; either version 3 of the License, or (at your
13 option) any later version.
14 
15 Octave is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 for more details.
19 
20 You should have received a copy of the GNU General Public License
21 along with Octave; see the file COPYING. If not, see
22 <http://www.gnu.org/licenses/>.
23 
24 */
25 
26 #ifdef HAVE_CONFIG_H
27 #include <config.h>
28 #endif
29 
30 #include <cassert>
31 
32 #include <iostream>
33 #include <sstream>
34 #include <vector>
35 #include <algorithm>
36 #include <new>
37 
38 #include "Array.h"
39 #include "Array-util.h"
40 #include "idx-vector.h"
41 #include "lo-error.h"
42 #include "oct-locbuf.h"
43 
44 // One dimensional array class. Handles the reference counting for
45 // all the derived classes.
46 
47 template <class T>
48 Array<T>::Array (const Array<T>& a, const dim_vector& dv)
49  : dimensions (dv), rep (a.rep),
50  slice_data (a.slice_data), slice_len (a.slice_len)
51 {
52  if (dimensions.safe_numel () != a.numel ())
53  {
54  std::string dimensions_str = a.dimensions.str ();
55  std::string new_dims_str = dimensions.str ();
56 
57  (*current_liboctave_error_handler)
58  ("reshape: can't reshape %s array to %s array",
59  dimensions_str.c_str (), new_dims_str.c_str ());
60  }
61 
62  // This goes here because if an exception is thrown by the above,
63  // destructor will be never called.
64  rep->count++;
66 }
67 
68 template <class T>
69 void
70 Array<T>::fill (const T& val)
71 {
72  if (rep->count > 1)
73  {
74  --rep->count;
75  rep = new ArrayRep (length (), val);
76  slice_data = rep->data;
77  }
78  else
79  fill_or_memset (slice_len, val, slice_data);
80 }
81 
82 template <class T>
83 void
85 {
86  if (--rep->count == 0)
87  delete rep;
88 
89  rep = nil_rep ();
90  rep->count++;
91  slice_data = rep->data;
92  slice_len = rep->len;
93 
94  dimensions = dim_vector ();
95 }
96 
97 template <class T>
98 void
100 {
101  if (--rep->count == 0)
102  delete rep;
103 
104  rep = new ArrayRep (dv.safe_numel ());
105  slice_data = rep->data;
106  slice_len = rep->len;
107 
108  dimensions = dv;
109  dimensions.chop_trailing_singletons ();
110 }
111 
112 template <class T>
113 Array<T>
114 Array<T>::squeeze (void) const
115 {
116  Array<T> retval = *this;
117 
118  if (ndims () > 2)
119  {
120  bool dims_changed = false;
121 
122  dim_vector new_dimensions = dimensions;
123 
124  int k = 0;
125 
126  for (int i = 0; i < ndims (); i++)
127  {
128  if (dimensions(i) == 1)
129  dims_changed = true;
130  else
131  new_dimensions(k++) = dimensions(i);
132  }
133 
134  if (dims_changed)
135  {
136  switch (k)
137  {
138  case 0:
139  new_dimensions = dim_vector (1, 1);
140  break;
141 
142  case 1:
143  {
144  octave_idx_type tmp = new_dimensions(0);
145 
146  new_dimensions.resize (2);
147 
148  new_dimensions(0) = tmp;
149  new_dimensions(1) = 1;
150  }
151  break;
152 
153  default:
154  new_dimensions.resize (k);
155  break;
156  }
157  }
158 
159  retval = Array<T> (*this, new_dimensions);
160  }
161 
162  return retval;
163 }
164 
165 template <class T>
168 {
169  return ::compute_index (i, j, dimensions);
170 }
171 
172 template <class T>
175  octave_idx_type k) const
176 {
177  return ::compute_index (i, j, k, dimensions);
178 }
179 
180 template <class T>
183 {
184  return ::compute_index (ra_idx, dimensions);
185 }
186 
187 template <class T>
188 T&
190 {
191  // Do checks directly to avoid recomputing slice_len.
192  if (n < 0)
194  if (n >= slice_len)
195  gripe_index_out_of_range (1, 1, n+1, slice_len);
196 
197  return elem (n);
198 }
199 
200 template <class T>
201 T&
203 {
204  return elem (compute_index (i, j));
205 }
206 
207 template <class T>
208 T&
210 {
211  return elem (compute_index (i, j, k));
212 }
213 
214 template <class T>
215 T&
217 {
218  return elem (compute_index (ra_idx));
219 }
220 
221 template <class T>
222 typename Array<T>::crefT
224 {
225  // Do checks directly to avoid recomputing slice_len.
226  if (n < 0)
228  if (n >= slice_len)
229  gripe_index_out_of_range (1, 1, n+1, slice_len);
230 
231  return elem (n);
232 }
233 
234 template <class T>
235 typename Array<T>::crefT
237 {
238  return elem (compute_index (i, j));
239 }
240 
241 template <class T>
242 typename Array<T>::crefT
244  octave_idx_type k) const
245 {
246  return elem (compute_index (i, j, k));
247 }
248 
249 template <class T>
250 typename Array<T>::crefT
252 {
253  return elem (compute_index (ra_idx));
254 }
255 
256 template <class T>
257 Array<T>
259 {
260  octave_idx_type r = dimensions(0);
261 #ifdef BOUNDS_CHECKING
262  if (k < 0 || k > dimensions.numel (1))
263  gripe_index_out_of_range (2, 2, k+1, dimensions.numel (1));
264 #endif
265 
266  return Array<T> (*this, dim_vector (r, 1), k*r, k*r + r);
267 }
268 
269 template <class T>
270 Array<T>
272 {
273  octave_idx_type r = dimensions(0), c = dimensions (1), p = r*c;
274 #ifdef BOUNDS_CHECKING
275  if (k < 0 || k > dimensions.numel (2))
276  gripe_index_out_of_range (3, 3, k+1, dimensions.numel (2));
277 #endif
278 
279  return Array<T> (*this, dim_vector (r, c), k*p, k*p + p);
280 }
281 
282 template <class T>
283 Array<T>
285 {
286 #ifdef BOUNDS_CHECKING
287  if (lo < 0)
288  gripe_index_out_of_range (1, 1, lo+1, numel ());
289  if (up > numel ())
290  gripe_index_out_of_range (1, 1, up, numel ());
291 #endif
292  if (up < lo) up = lo;
293  return Array<T> (*this, dim_vector (up - lo, 1), lo, up);
294 }
295 
296 // Helper class for multi-d dimension permuting (generalized transpose).
298 {
299  // STRIDE occupies the last half of the space allocated for dim to
300  // avoid a double allocation.
301 
302  int n;
303  int top;
306  bool use_blk;
307 
308 public:
310 
311  : n (dv.length ()), top (0), dim (new octave_idx_type [2*n]),
312  stride (dim + n), use_blk (false)
313  {
314  assert (n == perm.length ());
315 
316  // Get cumulative dimensions.
318  cdim[0] = 1;
319  for (int i = 1; i < n+1; i++) cdim[i] = cdim[i-1] * dv(i-1);
320 
321  // Setup the permuted strides.
322  for (int k = 0; k < n; k++)
323  {
324  int kk = perm(k);
325  dim[k] = dv(kk);
326  stride[k] = cdim[kk];
327  }
328 
329  // Reduce contiguous runs.
330  for (int k = 1; k < n; k++)
331  {
332  if (stride[k] == stride[top]*dim[top])
333  dim[top] *= dim[k];
334  else
335  {
336  top++;
337  dim[top] = dim[k];
338  stride[top] = stride[k];
339  }
340  }
341 
342  // Determine whether we can use block transposes.
343  use_blk = top >= 1 && stride[1] == 1 && stride[0] == dim[1];
344 
345  }
346 
347  ~rec_permute_helper (void) { delete [] dim; }
348 
349  // Helper method for fast blocked transpose.
350  template <class T>
351  static T *
352  blk_trans (const T *src, T *dest, octave_idx_type nr, octave_idx_type nc)
353  {
354  static const octave_idx_type m = 8;
355  OCTAVE_LOCAL_BUFFER (T, blk, m*m);
356  for (octave_idx_type kr = 0; kr < nr; kr += m)
357  for (octave_idx_type kc = 0; kc < nc; kc += m)
358  {
359  octave_idx_type lr = std::min (m, nr - kr);
360  octave_idx_type lc = std::min (m, nc - kc);
361  if (lr == m && lc == m)
362  {
363  const T *ss = src + kc * nr + kr;
364  for (octave_idx_type j = 0; j < m; j++)
365  for (octave_idx_type i = 0; i < m; i++)
366  blk[j*m+i] = ss[j*nr + i];
367  T *dd = dest + kr * nc + kc;
368  for (octave_idx_type j = 0; j < m; j++)
369  for (octave_idx_type i = 0; i < m; i++)
370  dd[j*nc+i] = blk[i*m+j];
371  }
372  else
373  {
374  const T *ss = src + kc * nr + kr;
375  for (octave_idx_type j = 0; j < lc; j++)
376  for (octave_idx_type i = 0; i < lr; i++)
377  blk[j*m+i] = ss[j*nr + i];
378  T *dd = dest + kr * nc + kc;
379  for (octave_idx_type j = 0; j < lr; j++)
380  for (octave_idx_type i = 0; i < lc; i++)
381  dd[j*nc+i] = blk[i*m+j];
382  }
383  }
384 
385  return dest + nr*nc;
386  }
387 
388 private:
389 
390  // Recursive N-d generalized transpose
391  template <class T>
392  T *do_permute (const T *src, T *dest, int lev) const
393  {
394  if (lev == 0)
395  {
396  octave_idx_type step = stride[0], len = dim[0];
397  if (step == 1)
398  {
399  copy_or_memcpy (len, src, dest);
400  dest += len;
401  }
402  else
403  {
404  for (octave_idx_type i = 0, j = 0; i < len; i++, j += step)
405  dest[i] = src[j];
406 
407  dest += len;
408  }
409  }
410  else if (use_blk && lev == 1)
411  dest = blk_trans (src, dest, dim[1], dim[0]);
412  else
413  {
414  octave_idx_type step = stride[lev], len = dim[lev];
415  for (octave_idx_type i = 0, j = 0; i < len; i++, j+= step)
416  dest = do_permute (src + i * step, dest, lev-1);
417  }
418 
419  return dest;
420  }
421 
422  // No copying!
423 
425 
427 
428 public:
429 
430  template <class T>
431  void permute (const T *src, T *dest) const { do_permute (src, dest, top); }
432 };
433 
434 
435 template <class T>
436 Array<T>
437 Array<T>::permute (const Array<octave_idx_type>& perm_vec_arg, bool inv) const
438 {
439  Array<T> retval;
440 
441  Array<octave_idx_type> perm_vec = perm_vec_arg;
442 
443  dim_vector dv = dims ();
444 
445  int perm_vec_len = perm_vec_arg.length ();
446 
447  if (perm_vec_len < dv.length ())
449  ("%s: invalid permutation vector", inv ? "ipermute" : "permute");
450 
451  dim_vector dv_new = dim_vector::alloc (perm_vec_len);
452 
453  // Append singleton dimensions as needed.
454  dv.resize (perm_vec_len, 1);
455 
456  // Need this array to check for identical elements in permutation array.
457  OCTAVE_LOCAL_BUFFER_INIT (bool, checked, perm_vec_len, false);
458 
459  bool identity = true;
460 
461  // Find dimension vector of permuted array.
462  for (int i = 0; i < perm_vec_len; i++)
463  {
464  octave_idx_type perm_elt = perm_vec.elem (i);
465  if (perm_elt >= perm_vec_len || perm_elt < 0)
466  {
467  (*current_liboctave_error_handler)
468  ("%s: permutation vector contains an invalid element",
469  inv ? "ipermute" : "permute");
470 
471  return retval;
472  }
473 
474  if (checked[perm_elt])
475  {
476  (*current_liboctave_error_handler)
477  ("%s: permutation vector cannot contain identical elements",
478  inv ? "ipermute" : "permute");
479 
480  return retval;
481  }
482  else
483  {
484  checked[perm_elt] = true;
485  identity = identity && perm_elt == i;
486  }
487  }
488 
489  if (identity)
490  return *this;
491 
492  if (inv)
493  {
494  for (int i = 0; i < perm_vec_len; i++)
495  perm_vec(perm_vec_arg(i)) = i;
496  }
497 
498  for (int i = 0; i < perm_vec_len; i++)
499  dv_new(i) = dv(perm_vec(i));
500 
501  retval = Array<T> (dv_new);
502 
503  if (numel () > 0)
504  {
505  rec_permute_helper rh (dv, perm_vec);
506  rh.permute (data (), retval.fortran_vec ());
507  }
508 
509  return retval;
510 }
511 
512 // Helper class for multi-d index reduction and recursive
513 // indexing/indexed assignment. Rationale: we could avoid recursion
514 // using a state machine instead. However, using recursion is much
515 // more amenable to possible parallelization in the future.
516 // Also, the recursion solution is cleaner and more understandable.
517 
519 {
520  // CDIM occupies the last half of the space allocated for dim to
521  // avoid a double allocation.
522 
523  int n;
524  int top;
528 
529 public:
531  : n (ia.length ()), top (0), dim (new octave_idx_type [2*n]),
532  cdim (dim + n), idx (new idx_vector [n])
533  {
534  assert (n > 0 && (dv.length () == std::max (n, 2)));
535 
536  dim[0] = dv(0);
537  cdim[0] = 1;
538  idx[0] = ia(0);
539 
540  for (int i = 1; i < n; i++)
541  {
542  // Try reduction...
543  if (idx[top].maybe_reduce (dim[top], ia(i), dv(i)))
544  {
545  // Reduction successful, fold dimensions.
546  dim[top] *= dv(i);
547  }
548  else
549  {
550  // Unsuccessful, store index & cumulative dim.
551  top++;
552  idx[top] = ia(i);
553  dim[top] = dv(i);
554  cdim[top] = cdim[top-1] * dim[top-1];
555  }
556  }
557  }
558 
559  ~rec_index_helper (void) { delete [] idx; delete [] dim; }
560 
561 private:
562 
563  // Recursive N-d indexing
564  template <class T>
565  T *do_index (const T *src, T *dest, int lev) const
566  {
567  if (lev == 0)
568  dest += idx[0].index (src, dim[0], dest);
569  else
570  {
571  octave_idx_type nn = idx[lev].length (dim[lev]), d = cdim[lev];
572  for (octave_idx_type i = 0; i < nn; i++)
573  dest = do_index (src + d*idx[lev].xelem (i), dest, lev-1);
574  }
575 
576  return dest;
577  }
578 
579  // Recursive N-d indexed assignment
580  template <class T>
581  const T *do_assign (const T *src, T *dest, int lev) const
582  {
583  if (lev == 0)
584  src += idx[0].assign (src, dim[0], dest);
585  else
586  {
587  octave_idx_type nn = idx[lev].length (dim[lev]), d = cdim[lev];
588  for (octave_idx_type i = 0; i < nn; i++)
589  src = do_assign (src, dest + d*idx[lev].xelem (i), lev-1);
590  }
591 
592  return src;
593  }
594 
595  // Recursive N-d indexed assignment
596  template <class T>
597  void do_fill (const T& val, T *dest, int lev) const
598  {
599  if (lev == 0)
600  idx[0].fill (val, dim[0], dest);
601  else
602  {
603  octave_idx_type nn = idx[lev].length (dim[lev]), d = cdim[lev];
604  for (octave_idx_type i = 0; i < nn; i++)
605  do_fill (val, dest + d*idx[lev].xelem (i), lev-1);
606  }
607  }
608 
609  // No copying!
610 
612 
614 
615 public:
616 
617  template <class T>
618  void index (const T *src, T *dest) const { do_index (src, dest, top); }
619 
620  template <class T>
621  void assign (const T *src, T *dest) const { do_assign (src, dest, top); }
622 
623  template <class T>
624  void fill (const T& val, T *dest) const { do_fill (val, dest, top); }
625 
627  octave_idx_type& u) const
628  {
629  return top == 0 && idx[0].is_cont_range (dim[0], l, u);
630  }
631 };
632 
633 // Helper class for multi-d recursive resizing
634 // This handles resize () in an efficient manner, touching memory only
635 // once (apart from reinitialization)
637 {
641  int n;
642 
643 public:
644  rec_resize_helper (const dim_vector& ndv, const dim_vector& odv)
645  : cext (0), sext (0), dext (0), n (0)
646  {
647  int l = ndv.length ();
648  assert (odv.length () == l);
649  octave_idx_type ld = 1;
650  int i = 0;
651  for (; i < l-1 && ndv(i) == odv(i); i++) ld *= ndv(i);
652  n = l - i;
653  cext = new octave_idx_type [3*n];
654  // Trick to avoid three allocations
655  sext = cext + n;
656  dext = sext + n;
657 
658  octave_idx_type sld = ld, dld = ld;
659  for (int j = 0; j < n; j++)
660  {
661  cext[j] = std::min (ndv(i+j), odv(i+j));
662  sext[j] = sld *= odv(i+j);
663  dext[j] = dld *= ndv(i+j);
664  }
665  cext[0] *= ld;
666  }
667 
668  ~rec_resize_helper (void) { delete [] cext; }
669 
670 private:
671 
672  // recursive resizing
673  template <class T>
674  void do_resize_fill (const T* src, T *dest, const T& rfv, int lev) const
675  {
676  if (lev == 0)
677  {
678  copy_or_memcpy (cext[0], src, dest);
679  fill_or_memset (dext[0] - cext[0], rfv, dest + cext[0]);
680  }
681  else
682  {
683  octave_idx_type sd = sext[lev-1], dd = dext[lev-1], k;
684  for (k = 0; k < cext[lev]; k++)
685  do_resize_fill (src + k * sd, dest + k * dd, rfv, lev - 1);
686 
687  fill_or_memset (dext[lev] - k * dd, rfv, dest + k * dd);
688  }
689  }
690 
691  // No copying!
692 
694 
696 
697 public:
698 
699  template <class T>
700  void resize_fill (const T* src, T *dest, const T& rfv) const
701  { do_resize_fill (src, dest, rfv, n-1); }
702 };
703 
704 template <class T>
705 Array<T>
706 Array<T>::index (const idx_vector& i) const
707 {
708  octave_idx_type n = numel ();
709  Array<T> retval;
710 
711  if (i.is_colon ())
712  {
713  // A(:) produces a shallow copy as a column vector.
714  retval = Array<T> (*this, dim_vector (n, 1));
715  }
716  else
717  {
718  if (i.extent (n) != n)
719  gripe_index_out_of_range (1, 1, i.extent (n), n); // throws
720 
721  // FIXME: this is the only place where orig_dimensions are used.
722  dim_vector rd = i.orig_dimensions ();
723  octave_idx_type il = i.length (n);
724 
725  // FIXME: this is for Matlab compatibility. Matlab 2007 given
726  //
727  // b = ones (3,1)
728  //
729  // yields the following:
730  //
731  // b(zeros (0,0)) gives []
732  // b(zeros (1,0)) gives zeros (0,1)
733  // b(zeros (0,1)) gives zeros (0,1)
734  // b(zeros (0,m)) gives zeros (0,m)
735  // b(zeros (m,0)) gives zeros (m,0)
736  // b(1:2) gives ones (2,1)
737  // b(ones (2)) gives ones (2) etc.
738  //
739  // As you can see, the behaviour is weird, but the tests end up pretty
740  // simple. Nah, I don't want to suggest that this is ad hoc :)
741 
742  if (ndims () == 2 && n != 1 && rd.is_vector ())
743  {
744  if (columns () == 1)
745  rd = dim_vector (il, 1);
746  else if (rows () == 1)
747  rd = dim_vector (1, il);
748  }
749 
750  octave_idx_type l, u;
751  if (il != 0 && i.is_cont_range (n, l, u))
752  // If suitable, produce a shallow slice.
753  retval = Array<T> (*this, rd, l, u);
754  else
755  {
756  // Don't use resize here to avoid useless initialization for POD
757  // types.
758  retval = Array<T> (rd);
759 
760  if (il != 0)
761  i.index (data (), n, retval.fortran_vec ());
762  }
763  }
764 
765  return retval;
766 }
767 
768 template <class T>
769 Array<T>
770 Array<T>::index (const idx_vector& i, const idx_vector& j) const
771 {
772  // Get dimensions, allowing Fortran indexing in the 2nd dim.
773  dim_vector dv = dimensions.redim (2);
774  octave_idx_type r = dv(0), c = dv(1);
775  Array<T> retval;
776 
777  if (i.is_colon () && j.is_colon ())
778  {
779  // A(:,:) produces a shallow copy.
780  retval = Array<T> (*this, dv);
781  }
782  else
783  {
784  if (i.extent (r) != r)
785  gripe_index_out_of_range (2, 1, i.extent (r), r); // throws
786  if (j.extent (c) != c)
787  gripe_index_out_of_range (2, 2, j.extent (c), c); // throws
788 
789  octave_idx_type n = numel (), il = i.length (r), jl = j.length (c);
790 
791  idx_vector ii (i);
792 
793  if (ii.maybe_reduce (r, j, c))
794  {
795  octave_idx_type l, u;
796  if (ii.length () > 0 && ii.is_cont_range (n, l, u))
797  // If suitable, produce a shallow slice.
798  retval = Array<T> (*this, dim_vector (il, jl), l, u);
799  else
800  {
801  // Don't use resize to avoid useless initialization for POD types.
802  retval = Array<T> (dim_vector (il, jl));
803 
804  ii.index (data (), n, retval.fortran_vec ());
805  }
806  }
807  else
808  {
809  // Don't use resize to avoid useless initialization for POD types.
810  retval = Array<T> (dim_vector (il, jl));
811 
812  const T* src = data ();
813  T *dest = retval.fortran_vec ();
814 
815  for (octave_idx_type k = 0; k < jl; k++)
816  dest += i.index (src + r * j.xelem (k), r, dest);
817  }
818  }
819 
820  return retval;
821 }
822 
823 template <class T>
824 Array<T>
826 {
827  int ial = ia.length ();
828  Array<T> retval;
829 
830  // FIXME: is this dispatching necessary?
831  if (ial == 1)
832  retval = index (ia(0));
833  else if (ial == 2)
834  retval = index (ia(0), ia(1));
835  else if (ial > 0)
836  {
837  // Get dimensions, allowing Fortran indexing in the last dim.
838  dim_vector dv = dimensions.redim (ial);
839 
840  // Check for out of bounds conditions.
841  bool all_colons = true;
842  for (int i = 0; i < ial; i++)
843  {
844  if (ia(i).extent (dv(i)) != dv(i))
845  gripe_index_out_of_range (ial, i+1, ia(i).extent (dv(i)), dv(i)); // throws
846 
847  all_colons = all_colons && ia(i).is_colon ();
848  }
849 
850 
851  if (all_colons)
852  {
853  // A(:,:,...,:) produces a shallow copy.
855  retval = Array<T> (*this, dv);
856  }
857  else
858  {
859  // Form result dimensions.
860  dim_vector rdv = dim_vector::alloc (ial);
861  for (int i = 0; i < ial; i++) rdv(i) = ia(i).length (dv(i));
863 
864  // Prepare for recursive indexing
865  rec_index_helper rh (dv, ia);
866 
867  octave_idx_type l, u;
868  if (rh.is_cont_range (l, u))
869  // If suitable, produce a shallow slice.
870  retval = Array<T> (*this, rdv, l, u);
871  else
872  {
873  // Don't use resize to avoid useless initialization for POD types.
874  retval = Array<T> (rdv);
875 
876  // Do it.
877  rh.index (data (), retval.fortran_vec ());
878  }
879  }
880  }
881 
882  return retval;
883 }
884 
885 // The default fill value. Override if you want a different one.
886 
887 template <class T>
888 T
890 {
891  static T zero = T ();
892  return zero;
893 }
894 
895 // Yes, we could do resize using index & assign. However, that would
896 // possibly involve a lot more memory traffic than we actually need.
897 
898 template <class T>
899 void
901 {
902  if (n >= 0 && ndims () == 2)
903  {
904  dim_vector dv;
905  // This is driven by Matlab's behaviour of giving a *row* vector
906  // on some out-of-bounds assignments. Specifically, Matlab
907  // allows a(i) with out-of-bouds i when a is either of 0x0, 1x0,
908  // 1x1, 0xN, and gives a row vector in all cases (yes, even the
909  // last one, search me why). Giving a column vector would make
910  // much more sense (given the way trailing singleton dims are
911  // treated).
912  bool invalid = false;
913  if (rows () == 0 || rows () == 1)
914  dv = dim_vector (1, n);
915  else if (columns () == 1)
916  dv = dim_vector (n, 1);
917  else
918  invalid = true;
919 
920  if (invalid)
922  else
923  {
924  octave_idx_type nx = numel ();
925  if (n == nx - 1 && n > 0)
926  {
927  // Stack "pop" operation.
928  if (rep->count == 1)
929  slice_data[slice_len-1] = T ();
930  slice_len--;
931  dimensions = dv;
932  }
933  else if (n == nx + 1 && nx > 0)
934  {
935  // Stack "push" operation.
936  if (rep->count == 1
937  && slice_data + slice_len < rep->data + rep->len)
938  {
939  slice_data[slice_len++] = rfv;
940  dimensions = dv;
941  }
942  else
943  {
944  static const octave_idx_type max_stack_chunk = 1024;
945  octave_idx_type nn = n + std::min (nx, max_stack_chunk);
946  Array<T> tmp (Array<T> (dim_vector (nn, 1)), dv, 0, n);
947  T *dest = tmp.fortran_vec ();
948 
949  copy_or_memcpy (nx, data (), dest);
950  dest[nx] = rfv;
951 
952  *this = tmp;
953  }
954  }
955  else if (n != nx)
956  {
957  Array<T> tmp = Array<T> (dv);
958  T *dest = tmp.fortran_vec ();
959 
960  octave_idx_type n0 = std::min (n, nx), n1 = n - n0;
961  copy_or_memcpy (n0, data (), dest);
962  fill_or_memset (n1, rfv, dest + n0);
963 
964  *this = tmp;
965  }
966  }
967  }
968  else
970 }
971 
972 template <class T>
973 void
975 {
976  if (r >= 0 && c >= 0 && ndims () == 2)
977  {
978  octave_idx_type rx = rows (), cx = columns ();
979  if (r != rx || c != cx)
980  {
981  Array<T> tmp = Array<T> (dim_vector (r, c));
982  T *dest = tmp.fortran_vec ();
983 
984  octave_idx_type r0 = std::min (r, rx), r1 = r - r0;
985  octave_idx_type c0 = std::min (c, cx), c1 = c - c0;
986  const T *src = data ();
987  if (r == rx)
988  {
989  copy_or_memcpy (r * c0, src, dest);
990  dest += r * c0;
991  }
992  else
993  {
994  for (octave_idx_type k = 0; k < c0; k++)
995  {
996  copy_or_memcpy (r0, src, dest);
997  src += rx;
998  dest += r0;
999  fill_or_memset (r1, rfv, dest);
1000  dest += r1;
1001  }
1002  }
1003 
1004  fill_or_memset (r * c1, rfv, dest);
1005 
1006  *this = tmp;
1007  }
1008  }
1009  else
1011 
1012 }
1013 
1014 template<class T>
1015 void
1016 Array<T>::resize (const dim_vector& dv, const T& rfv)
1017 {
1018  int dvl = dv.length ();
1019  if (dvl == 2)
1020  resize2 (dv(0), dv(1), rfv);
1021  else if (dimensions != dv)
1022  {
1023  if (dimensions.length () <= dvl && ! dv.any_neg ())
1024  {
1025  Array<T> tmp (dv);
1026  // Prepare for recursive resizing.
1027  rec_resize_helper rh (dv, dimensions.redim (dvl));
1028 
1029  // Do it.
1030  rh.resize_fill (data (), tmp.fortran_vec (), rfv);
1031  *this = tmp;
1032  }
1033  else
1035  }
1036 }
1037 
1038 template <class T>
1039 Array<T>
1040 Array<T>::index (const idx_vector& i, bool resize_ok, const T& rfv) const
1041 {
1042  Array<T> tmp = *this;
1043  if (resize_ok)
1044  {
1045  octave_idx_type n = numel (), nx = i.extent (n);
1046  if (n != nx)
1047  {
1048  if (i.is_scalar ())
1049  return Array<T> (dim_vector (1, 1), rfv);
1050  else
1051  tmp.resize1 (nx, rfv);
1052  }
1053 
1054  if (tmp.numel () != nx)
1055  return Array<T> ();
1056  }
1057 
1058  return tmp.index (i);
1059 }
1060 
1061 template <class T>
1062 Array<T>
1064  bool resize_ok, const T& rfv) const
1065 {
1066  Array<T> tmp = *this;
1067  if (resize_ok)
1068  {
1069  dim_vector dv = dimensions.redim (2);
1070  octave_idx_type r = dv(0), c = dv(1);
1071  octave_idx_type rx = i.extent (r), cx = j.extent (c);
1072  if (r != rx || c != cx)
1073  {
1074  if (i.is_scalar () && j.is_scalar ())
1075  return Array<T> (dim_vector (1, 1), rfv);
1076  else
1077  tmp.resize2 (rx, cx, rfv);
1078  }
1079 
1080  if (tmp.rows () != rx || tmp.columns () != cx)
1081  return Array<T> ();
1082  }
1083 
1084  return tmp.index (i, j);
1085 }
1086 
1087 template <class T>
1088 Array<T>
1090  bool resize_ok, const T& rfv) const
1091 {
1092  Array<T> tmp = *this;
1093  if (resize_ok)
1094  {
1095  int ial = ia.length ();
1096  dim_vector dv = dimensions.redim (ial);
1097  dim_vector dvx = dim_vector::alloc (ial);
1098  for (int i = 0; i < ial; i++) dvx(i) = ia(i).extent (dv (i));
1099  if (! (dvx == dv))
1100  {
1101  bool all_scalars = true;
1102  for (int i = 0; i < ial; i++)
1103  all_scalars = all_scalars && ia(i).is_scalar ();
1104  if (all_scalars)
1105  return Array<T> (dim_vector (1, 1), rfv);
1106  else
1107  tmp.resize (dvx, rfv);
1108  }
1109 
1110  if (tmp.dimensions != dvx)
1111  return Array<T> ();
1112  }
1113 
1114  return tmp.index (ia);
1115 }
1116 
1117 
1118 template <class T>
1119 void
1120 Array<T>::assign (const idx_vector& i, const Array<T>& rhs, const T& rfv)
1121 {
1122  octave_idx_type n = numel (), rhl = rhs.numel ();
1123 
1124  if (rhl == 1 || i.length (n) == rhl)
1125  {
1126  octave_idx_type nx = i.extent (n);
1127  bool colon = i.is_colon_equiv (nx);
1128  // Try to resize first if necessary.
1129  if (nx != n)
1130  {
1131  // Optimize case A = []; A(1:n) = X with A empty.
1132  if (dimensions.zero_by_zero () && colon)
1133  {
1134  if (rhl == 1)
1135  *this = Array<T> (dim_vector (1, nx), rhs(0));
1136  else
1137  *this = Array<T> (rhs, dim_vector (1, nx));
1138  return;
1139  }
1140 
1141  resize1 (nx, rfv);
1142  n = numel ();
1143  }
1144 
1145  if (colon)
1146  {
1147  // A(:) = X makes a full fill or a shallow copy.
1148  if (rhl == 1)
1149  fill (rhs(0));
1150  else
1151  *this = rhs.reshape (dimensions);
1152  }
1153  else
1154  {
1155  if (rhl == 1)
1156  i.fill (rhs(0), n, fortran_vec ());
1157  else
1158  i.assign (rhs.data (), n, fortran_vec ());
1159  }
1160  }
1161  else
1163 }
1164 
1165 template <class T>
1166 void
1168  const Array<T>& rhs, const T& rfv)
1169 {
1170  bool initial_dims_all_zero = dimensions.all_zero ();
1171 
1172  // Get RHS extents, discarding singletons.
1173  dim_vector rhdv = rhs.dims ();
1174 
1175  // Get LHS extents, allowing Fortran indexing in the second dim.
1176  dim_vector dv = dimensions.redim (2);
1177 
1178  // Check for out-of-bounds and form resizing dimensions.
1179  dim_vector rdv;
1180 
1181  // In the special when all dimensions are zero, colons are allowed
1182  // to inquire the shape of RHS. The rules are more obscure, so we
1183  // solve that elsewhere.
1184  if (initial_dims_all_zero)
1185  rdv = zero_dims_inquire (i, j, rhdv);
1186  else
1187  {
1188  rdv(0) = i.extent (dv(0));
1189  rdv(1) = j.extent (dv(1));
1190  }
1191 
1192  bool isfill = rhs.numel () == 1;
1193  octave_idx_type il = i.length (rdv(0)), jl = j.length (rdv(1));
1194  rhdv.chop_all_singletons ();
1195  bool match = (isfill
1196  || (rhdv.length () == 2 && il == rhdv(0) && jl == rhdv(1)));
1197  match = match || (il == 1 && jl == rhdv(0) && rhdv(1) == 1);
1198 
1199  if (match)
1200  {
1201  bool all_colons = (i.is_colon_equiv (rdv(0))
1202  && j.is_colon_equiv (rdv(1)));
1203  // Resize if requested.
1204  if (rdv != dv)
1205  {
1206  // Optimize case A = []; A(1:m, 1:n) = X
1207  if (dv.zero_by_zero () && all_colons)
1208  {
1209  if (isfill)
1210  *this = Array<T> (rdv, rhs(0));
1211  else
1212  *this = Array<T> (rhs, rdv);
1213  return;
1214  }
1215 
1216  resize (rdv, rfv);
1217  dv = dimensions;
1218  }
1219 
1220  if (all_colons)
1221  {
1222  // A(:,:) = X makes a full fill or a shallow copy
1223  if (isfill)
1224  fill (rhs(0));
1225  else
1226  *this = rhs.reshape (dimensions);
1227  }
1228  else
1229  {
1230  // The actual work.
1231  octave_idx_type n = numel (), r = dv (0), c = dv (1);
1232  idx_vector ii (i);
1233 
1234  const T* src = rhs.data ();
1235  T *dest = fortran_vec ();
1236 
1237  // Try reduction first.
1238  if (ii.maybe_reduce (r, j, c))
1239  {
1240  if (isfill)
1241  ii.fill (*src, n, dest);
1242  else
1243  ii.assign (src, n, dest);
1244  }
1245  else
1246  {
1247  if (isfill)
1248  {
1249  for (octave_idx_type k = 0; k < jl; k++)
1250  i.fill (*src, r, dest + r * j.xelem (k));
1251  }
1252  else
1253  {
1254  for (octave_idx_type k = 0; k < jl; k++)
1255  src += i.assign (src, r, dest + r * j.xelem (k));
1256  }
1257  }
1258  }
1259  }
1260  else
1262 }
1263 
1264 template <class T>
1265 void
1267  const Array<T>& rhs, const T& rfv)
1268 {
1269  int ial = ia.length ();
1270 
1271  // FIXME: is this dispatching necessary / desirable?
1272  if (ial == 1)
1273  assign (ia(0), rhs, rfv);
1274  else if (ial == 2)
1275  assign (ia(0), ia(1), rhs, rfv);
1276  else if (ial > 0)
1277  {
1278  bool initial_dims_all_zero = dimensions.all_zero ();
1279 
1280  // Get RHS extents, discarding singletons.
1281  dim_vector rhdv = rhs.dims ();
1282 
1283  // Get LHS extents, allowing Fortran indexing in the second dim.
1284  dim_vector dv = dimensions.redim (ial);
1285 
1286  // Get the extents forced by indexing.
1287  dim_vector rdv;
1288 
1289  // In the special when all dimensions are zero, colons are
1290  // allowed to inquire the shape of RHS. The rules are more
1291  // obscure, so we solve that elsewhere.
1292  if (initial_dims_all_zero)
1293  rdv = zero_dims_inquire (ia, rhdv);
1294  else
1295  {
1296  rdv = dim_vector::alloc (ial);
1297  for (int i = 0; i < ial; i++)
1298  rdv(i) = ia(i).extent (dv(i));
1299  }
1300 
1301  // Check whether LHS and RHS match, up to singleton dims.
1302  bool match = true, all_colons = true, isfill = rhs.numel () == 1;
1303 
1304  rhdv.chop_all_singletons ();
1305  int j = 0, rhdvl = rhdv.length ();
1306  for (int i = 0; i < ial; i++)
1307  {
1308  all_colons = all_colons && ia(i).is_colon_equiv (rdv(i));
1309  octave_idx_type l = ia(i).length (rdv(i));
1310  if (l == 1) continue;
1311  match = match && j < rhdvl && l == rhdv(j++);
1312  }
1313 
1314  match = match && (j == rhdvl || rhdv(j) == 1);
1315  match = match || isfill;
1316 
1317  if (match)
1318  {
1319  // Resize first if necessary.
1320  if (rdv != dv)
1321  {
1322  // Optimize case A = []; A(1:m, 1:n) = X
1323  if (dv.zero_by_zero () && all_colons)
1324  {
1325  rdv.chop_trailing_singletons ();
1326  if (isfill)
1327  *this = Array<T> (rdv, rhs(0));
1328  else
1329  *this = Array<T> (rhs, rdv);
1330  return;
1331  }
1332 
1333  resize (rdv, rfv);
1334  dv = rdv;
1335  }
1336 
1337  if (all_colons)
1338  {
1339  // A(:,:,...,:) = X makes a full fill or a shallow copy.
1340  if (isfill)
1341  fill (rhs(0));
1342  else
1343  *this = rhs.reshape (dimensions);
1344  }
1345  else
1346  {
1347  // Do the actual work.
1348 
1349  // Prepare for recursive indexing
1350  rec_index_helper rh (dv, ia);
1351 
1352  // Do it.
1353  if (isfill)
1354  rh.fill (rhs(0), fortran_vec ());
1355  else
1356  rh.assign (rhs.data (), fortran_vec ());
1357  }
1358  }
1359  else
1361  }
1362 }
1363 
1364 template <class T>
1365 void
1367 {
1368  octave_idx_type n = numel ();
1369  if (i.is_colon ())
1370  {
1371  *this = Array<T> ();
1372  }
1373  else if (i.length (n) != 0)
1374  {
1375  if (i.extent (n) != n)
1376  gripe_del_index_out_of_range (true, i.extent (n), n);
1377 
1378  octave_idx_type l, u;
1379  bool col_vec = ndims () == 2 && columns () == 1 && rows () != 1;
1380  if (i.is_scalar () && i(0) == n-1 && dimensions.is_vector ())
1381  {
1382  // Stack "pop" operation.
1383  resize1 (n-1);
1384  }
1385  else if (i.is_cont_range (n, l, u))
1386  {
1387  // Special case deleting a contiguous range.
1388  octave_idx_type m = n + l - u;
1389  Array<T> tmp (dim_vector (col_vec ? m : 1, !col_vec ? m : 1));
1390  const T *src = data ();
1391  T *dest = tmp.fortran_vec ();
1392  copy_or_memcpy (l, src, dest);
1393  copy_or_memcpy (n - u, src + u, dest + l);
1394  *this = tmp;
1395  }
1396  else
1397  {
1398  // Use index.
1399  *this = index (i.complement (n));
1400  }
1401  }
1402 }
1403 
1404 template <class T>
1405 void
1407 {
1408  if (dim < 0 || dim >= ndims ())
1409  {
1410  (*current_liboctave_error_handler)
1411  ("invalid dimension in delete_elements");
1412  return;
1413  }
1414 
1415  octave_idx_type n = dimensions (dim);
1416  if (i.is_colon ())
1417  {
1418  *this = Array<T> ();
1419  }
1420  else if (i.length (n) != 0)
1421  {
1422  if (i.extent (n) != n)
1423  gripe_del_index_out_of_range (false, i.extent (n), n);
1424 
1425  octave_idx_type l, u;
1426 
1427  if (i.is_cont_range (n, l, u))
1428  {
1429  // Special case deleting a contiguous range.
1430  octave_idx_type nd = n + l - u, dl = 1, du = 1;
1431  dim_vector rdv = dimensions;
1432  rdv(dim) = nd;
1433  for (int k = 0; k < dim; k++) dl *= dimensions(k);
1434  for (int k = dim + 1; k < ndims (); k++) du *= dimensions(k);
1435 
1436  // Special case deleting a contiguous range.
1437  Array<T> tmp = Array<T> (rdv);
1438  const T *src = data ();
1439  T *dest = tmp.fortran_vec ();
1440  l *= dl; u *= dl; n *= dl;
1441  for (octave_idx_type k = 0; k < du; k++)
1442  {
1443  copy_or_memcpy (l, src, dest);
1444  dest += l;
1445  copy_or_memcpy (n - u, src + u, dest);
1446  dest += n - u;
1447  src += n;
1448  }
1449 
1450  *this = tmp;
1451  }
1452  else
1453  {
1454  // Use index.
1455  Array<idx_vector> ia (dim_vector (ndims (), 1), idx_vector::colon);
1456  ia (dim) = i.complement (n);
1457  *this = index (ia);
1458  }
1459  }
1460 }
1461 
1462 template <class T>
1463 void
1465 {
1466  int ial = ia.length ();
1467 
1468  if (ial == 1)
1469  delete_elements (ia(0));
1470  else
1471  {
1472  int k, dim = -1;
1473  for (k = 0; k < ial; k++)
1474  {
1475  if (! ia(k).is_colon ())
1476  {
1477  if (dim < 0)
1478  dim = k;
1479  else
1480  break;
1481  }
1482  }
1483  if (dim < 0)
1484  {
1485  dim_vector dv = dimensions;
1486  dv(0) = 0;
1487  *this = Array<T> (dv);
1488  }
1489  else if (k == ial)
1490  {
1491  delete_elements (dim, ia(dim));
1492  }
1493  else
1494  {
1495  // Allow the null assignment to succeed if it won't change
1496  // anything because the indices reference an empty slice,
1497  // provided that there is at most one non-colon (or
1498  // equivalent) index. So, we still have the requirement of
1499  // deleting a slice, but it is OK if the slice is empty.
1500 
1501  // For compatibility with Matlab, stop checking once we see
1502  // more than one non-colon index or an empty index. Matlab
1503  // considers "[]" to be an empty index but not "false". We
1504  // accept both.
1505 
1506  bool empty_assignment = false;
1507 
1508  int num_non_colon_indices = 0;
1509 
1510  int nd = ndims ();
1511 
1512  for (int i = 0; i < ial; i++)
1513  {
1514  octave_idx_type dim_len = i >= nd ? 1 : dimensions(i);
1515 
1516  if (ia(i).length (dim_len) == 0)
1517  {
1518  empty_assignment = true;
1519  break;
1520  }
1521 
1522  if (! ia(i).is_colon_equiv (dim_len))
1523  {
1524  num_non_colon_indices++;
1525 
1526  if (num_non_colon_indices == 2)
1527  break;
1528  }
1529  }
1530 
1531  if (! empty_assignment)
1532  (*current_liboctave_error_handler)
1533  ("a null assignment can only have one non-colon index");
1534  }
1535  }
1536 
1537 }
1538 
1539 template <class T>
1540 Array<T>&
1542 {
1543  idx_vector i (r, r + a.rows ());
1544  idx_vector j (c, c + a.columns ());
1545  if (ndims () == 2 && a.ndims () == 2)
1546  assign (i, j, a);
1547  else
1548  {
1549  Array<idx_vector> idx (dim_vector (a.ndims (), 1));
1550  idx(0) = i;
1551  idx(1) = j;
1552  for (int k = 2; k < a.ndims (); k++)
1553  idx(k) = idx_vector (0, a.dimensions(k));
1554  assign (idx, a);
1555  }
1556 
1557  return *this;
1558 }
1559 
1560 template <class T>
1561 Array<T>&
1563 {
1564  octave_idx_type n = ra_idx.length ();
1565  Array<idx_vector> idx (dim_vector (n, 1));
1566  const dim_vector dva = a.dims ().redim (n);
1567  for (octave_idx_type k = 0; k < n; k++)
1568  idx(k) = idx_vector (ra_idx (k), ra_idx (k) + dva(k));
1569 
1570  assign (idx, a);
1571 
1572  return *this;
1573 }
1574 
1575 
1576 template <class T>
1577 Array<T>
1579 {
1580  assert (ndims () == 2);
1581 
1582  octave_idx_type nr = dim1 ();
1583  octave_idx_type nc = dim2 ();
1584 
1585  if (nr >= 8 && nc >= 8)
1586  {
1587  Array<T> result (dim_vector (nc, nr));
1588 
1589  // Reuse the implementation used for permuting.
1590 
1591  rec_permute_helper::blk_trans (data (), result.fortran_vec (), nr, nc);
1592 
1593  return result;
1594  }
1595  else if (nr > 1 && nc > 1)
1596  {
1597  Array<T> result (dim_vector (nc, nr));
1598 
1599  for (octave_idx_type j = 0; j < nc; j++)
1600  for (octave_idx_type i = 0; i < nr; i++)
1601  result.xelem (j, i) = xelem (i, j);
1602 
1603  return result;
1604  }
1605  else
1606  {
1607  // Fast transpose for vectors and empty matrices.
1608  return Array<T> (*this, dim_vector (nc, nr));
1609  }
1610 }
1611 
1612 template <class T>
1613 static T
1614 no_op_fcn (const T& x)
1615 {
1616  return x;
1617 }
1618 
1619 template <class T>
1620 Array<T>
1621 Array<T>::hermitian (T (*fcn) (const T&)) const
1622 {
1623  assert (ndims () == 2);
1624 
1625  if (! fcn)
1626  fcn = no_op_fcn<T>;
1627 
1628  octave_idx_type nr = dim1 ();
1629  octave_idx_type nc = dim2 ();
1630 
1631  if (nr >= 8 && nc >= 8)
1632  {
1633  Array<T> result (dim_vector (nc, nr));
1634 
1635  // Blocked transpose to attempt to avoid cache misses.
1636 
1637  // Don't use OCTAVE_LOCAL_BUFFER here as it doesn't work with bool
1638  // on some compilers.
1639  T buf[64];
1640 
1641  octave_idx_type ii = 0, jj;
1642  for (jj = 0; jj < (nc - 8 + 1); jj += 8)
1643  {
1644  for (ii = 0; ii < (nr - 8 + 1); ii += 8)
1645  {
1646  // Copy to buffer
1647  for (octave_idx_type j = jj, k = 0, idxj = jj * nr;
1648  j < jj + 8; j++, idxj += nr)
1649  for (octave_idx_type i = ii; i < ii + 8; i++)
1650  buf[k++] = xelem (i + idxj);
1651 
1652  // Copy from buffer
1653  for (octave_idx_type i = ii, idxi = ii * nc; i < ii + 8;
1654  i++, idxi += nc)
1655  for (octave_idx_type j = jj, k = i - ii; j < jj + 8;
1656  j++, k+=8)
1657  result.xelem (j + idxi) = fcn (buf[k]);
1658  }
1659 
1660  if (ii < nr)
1661  for (octave_idx_type j = jj; j < jj + 8; j++)
1662  for (octave_idx_type i = ii; i < nr; i++)
1663  result.xelem (j, i) = fcn (xelem (i, j));
1664  }
1665 
1666  for (octave_idx_type j = jj; j < nc; j++)
1667  for (octave_idx_type i = 0; i < nr; i++)
1668  result.xelem (j, i) = fcn (xelem (i, j));
1669 
1670  return result;
1671  }
1672  else
1673  {
1674  Array<T> result (dim_vector (nc, nr));
1675 
1676  for (octave_idx_type j = 0; j < nc; j++)
1677  for (octave_idx_type i = 0; i < nr; i++)
1678  result.xelem (j, i) = fcn (xelem (i, j));
1679 
1680  return result;
1681  }
1682 }
1683 
1684 /*
1685 
1686 %% Tranpose tests for matrices of the tile size and plus or minus a row
1687 %% and with four tiles.
1688 
1689 %!shared m7, mt7, m8, mt8, m9, mt9
1690 %! m7 = reshape (1 : 7*8, 8, 7);
1691 %! mt7 = [1:8; 9:16; 17:24; 25:32; 33:40; 41:48; 49:56];
1692 %! m8 = reshape (1 : 8*8, 8, 8);
1693 %! mt8 = mt8 = [mt7; 57:64];
1694 %! m9 = reshape (1 : 9*8, 8, 9);
1695 %! mt9 = [mt8; 65:72];
1696 
1697 %!assert (m7', mt7)
1698 %!assert ((1i*m7).', 1i * mt7)
1699 %!assert ((1i*m7)', conj (1i * mt7))
1700 %!assert (m8', mt8)
1701 %!assert ((1i*m8).', 1i * mt8)
1702 %!assert ((1i*m8)', conj (1i * mt8))
1703 %!assert (m9', mt9)
1704 %!assert ((1i*m9).', 1i * mt9)
1705 %!assert ((1i*m9)', conj (1i * mt9))
1706 %!assert ([m7, m8; m7, m8]', [mt7, mt7; mt8, mt8])
1707 %!assert ((1i*[m7, m8; m7, m8]).', 1i * [mt7, mt7; mt8, mt8])
1708 %!assert ((1i*[m7, m8; m7, m8])', conj (1i * [mt7, mt7; mt8, mt8]))
1709 %!assert ([m8, m8; m8, m8]', [mt8, mt8; mt8, mt8])
1710 %!assert ((1i*[m8, m8; m8, m8]).', 1i * [mt8, mt8; mt8, mt8])
1711 %!assert ((1i*[m8, m8; m8, m8])', conj (1i * [mt8, mt8; mt8, mt8]))
1712 %!assert ([m9, m8; m9, m8]', [mt9, mt9; mt8, mt8])
1713 %!assert ((1i*[m9, m8; m9, m8]).', 1i * [mt9, mt9; mt8, mt8])
1714 %!assert ((1i*[m9, m8; m9, m8])', conj (1i * [mt9, mt9; mt8, mt8]))
1715 
1716 */
1717 
1718 template <class T>
1719 T *
1721 {
1722  make_unique ();
1723 
1724  return slice_data;
1725 }
1726 
1727 // Non-real types don't have NaNs.
1728 template <class T>
1729 inline bool
1731 {
1732  return false;
1733 }
1734 
1735 template <class T>
1736 Array<T>
1737 Array<T>::sort (int dim, sortmode mode) const
1738 {
1739  if (dim < 0)
1740  {
1741  (*current_liboctave_error_handler)
1742  ("sort: invalid dimension");
1743  return Array<T> ();
1744  }
1745 
1746  Array<T> m (dims ());
1747 
1748  dim_vector dv = m.dims ();
1749 
1750  if (m.length () < 1)
1751  return m;
1752 
1753  if (dim >= dv.length ())
1754  dv.resize (dim+1, 1);
1755 
1756  octave_idx_type ns = dv(dim);
1757  octave_idx_type iter = dv.numel () / ns;
1758  octave_idx_type stride = 1;
1759 
1760  for (int i = 0; i < dim; i++)
1761  stride *= dv(i);
1762 
1763  T *v = m.fortran_vec ();
1764  const T *ov = data ();
1765 
1766  octave_sort<T> lsort;
1767 
1768  if (mode != UNSORTED)
1769  lsort.set_compare (mode);
1770  else
1771  return m;
1772 
1773  if (stride == 1)
1774  {
1775  for (octave_idx_type j = 0; j < iter; j++)
1776  {
1777  // copy and partition out NaNs.
1778  // FIXME: impact on integer types noticeable?
1779  octave_idx_type kl = 0, ku = ns;
1780  for (octave_idx_type i = 0; i < ns; i++)
1781  {
1782  T tmp = ov[i];
1783  if (sort_isnan<T> (tmp))
1784  v[--ku] = tmp;
1785  else
1786  v[kl++] = tmp;
1787  }
1788 
1789  // sort.
1790  lsort.sort (v, kl);
1791 
1792  if (ku < ns)
1793  {
1794  // NaNs are in reverse order
1795  std::reverse (v + ku, v + ns);
1796  if (mode == DESCENDING)
1797  std::rotate (v, v + ku, v + ns);
1798  }
1799 
1800  v += ns;
1801  ov += ns;
1802  }
1803  }
1804  else
1805  {
1806  OCTAVE_LOCAL_BUFFER (T, buf, ns);
1807 
1808  for (octave_idx_type j = 0; j < iter; j++)
1809  {
1810  octave_idx_type offset = j;
1811  octave_idx_type offset2 = 0;
1812 
1813  while (offset >= stride)
1814  {
1815  offset -= stride;
1816  offset2++;
1817  }
1818 
1819  offset += offset2 * stride * ns;
1820 
1821  // gather and partition out NaNs.
1822  // FIXME: impact on integer types noticeable?
1823  octave_idx_type kl = 0, ku = ns;
1824  for (octave_idx_type i = 0; i < ns; i++)
1825  {
1826  T tmp = ov[i*stride + offset];
1827  if (sort_isnan<T> (tmp))
1828  buf[--ku] = tmp;
1829  else
1830  buf[kl++] = tmp;
1831  }
1832 
1833  // sort.
1834  lsort.sort (buf, kl);
1835 
1836  if (ku < ns)
1837  {
1838  // NaNs are in reverse order
1839  std::reverse (buf + ku, buf + ns);
1840  if (mode == DESCENDING)
1841  std::rotate (buf, buf + ku, buf + ns);
1842  }
1843 
1844  // scatter.
1845  for (octave_idx_type i = 0; i < ns; i++)
1846  v[i*stride + offset] = buf[i];
1847  }
1848  }
1849 
1850  return m;
1851 }
1852 
1853 template <class T>
1854 Array<T>
1856  sortmode mode) const
1857 {
1858  if (dim < 0 || dim >= ndims ())
1859  {
1860  (*current_liboctave_error_handler)
1861  ("sort: invalid dimension");
1862  return Array<T> ();
1863  }
1864 
1865  Array<T> m (dims ());
1866 
1867  dim_vector dv = m.dims ();
1868 
1869  if (m.length () < 1)
1870  {
1871  sidx = Array<octave_idx_type> (dv);
1872  return m;
1873  }
1874 
1875  octave_idx_type ns = dv(dim);
1876  octave_idx_type iter = dv.numel () / ns;
1877  octave_idx_type stride = 1;
1878 
1879  for (int i = 0; i < dim; i++)
1880  stride *= dv(i);
1881 
1882  T *v = m.fortran_vec ();
1883  const T *ov = data ();
1884 
1885  octave_sort<T> lsort;
1886 
1887  sidx = Array<octave_idx_type> (dv);
1888  octave_idx_type *vi = sidx.fortran_vec ();
1889 
1890  if (mode != UNSORTED)
1891  lsort.set_compare (mode);
1892  else
1893  return m;
1894 
1895  if (stride == 1)
1896  {
1897  for (octave_idx_type j = 0; j < iter; j++)
1898  {
1899  // copy and partition out NaNs.
1900  // FIXME: impact on integer types noticeable?
1901  octave_idx_type kl = 0, ku = ns;
1902  for (octave_idx_type i = 0; i < ns; i++)
1903  {
1904  T tmp = ov[i];
1905  if (sort_isnan<T> (tmp))
1906  {
1907  --ku;
1908  v[ku] = tmp;
1909  vi[ku] = i;
1910  }
1911  else
1912  {
1913  v[kl] = tmp;
1914  vi[kl] = i;
1915  kl++;
1916  }
1917  }
1918 
1919  // sort.
1920  lsort.sort (v, vi, kl);
1921 
1922  if (ku < ns)
1923  {
1924  // NaNs are in reverse order
1925  std::reverse (v + ku, v + ns);
1926  std::reverse (vi + ku, vi + ns);
1927  if (mode == DESCENDING)
1928  {
1929  std::rotate (v, v + ku, v + ns);
1930  std::rotate (vi, vi + ku, vi + ns);
1931  }
1932  }
1933 
1934  v += ns;
1935  vi += ns;
1936  ov += ns;
1937  }
1938  }
1939  else
1940  {
1941  OCTAVE_LOCAL_BUFFER (T, buf, ns);
1943 
1944  for (octave_idx_type j = 0; j < iter; j++)
1945  {
1946  octave_idx_type offset = j;
1947  octave_idx_type offset2 = 0;
1948 
1949  while (offset >= stride)
1950  {
1951  offset -= stride;
1952  offset2++;
1953  }
1954 
1955  offset += offset2 * stride * ns;
1956 
1957  // gather and partition out NaNs.
1958  // FIXME: impact on integer types noticeable?
1959  octave_idx_type kl = 0, ku = ns;
1960  for (octave_idx_type i = 0; i < ns; i++)
1961  {
1962  T tmp = ov[i*stride + offset];
1963  if (sort_isnan<T> (tmp))
1964  {
1965  --ku;
1966  buf[ku] = tmp;
1967  bufi[ku] = i;
1968  }
1969  else
1970  {
1971  buf[kl] = tmp;
1972  bufi[kl] = i;
1973  kl++;
1974  }
1975  }
1976 
1977  // sort.
1978  lsort.sort (buf, bufi, kl);
1979 
1980  if (ku < ns)
1981  {
1982  // NaNs are in reverse order
1983  std::reverse (buf + ku, buf + ns);
1984  std::reverse (bufi + ku, bufi + ns);
1985  if (mode == DESCENDING)
1986  {
1987  std::rotate (buf, buf + ku, buf + ns);
1988  std::rotate (bufi, bufi + ku, bufi + ns);
1989  }
1990  }
1991 
1992  // scatter.
1993  for (octave_idx_type i = 0; i < ns; i++)
1994  v[i*stride + offset] = buf[i];
1995  for (octave_idx_type i = 0; i < ns; i++)
1996  vi[i*stride + offset] = bufi[i];
1997  }
1998  }
1999 
2000  return m;
2001 }
2002 
2003 template <class T>
2005 safe_comparator (sortmode mode, const Array<T>& /* a */,
2006  bool /* allow_chk */)
2007 {
2008  if (mode == ASCENDING)
2010  else if (mode == DESCENDING)
2012  else
2013  return 0;
2014 }
2015 
2016 template <class T>
2017 sortmode
2019 {
2020  octave_sort<T> lsort;
2021 
2022  octave_idx_type n = numel ();
2023 
2024  if (n <= 1)
2025  return mode ? mode : ASCENDING;
2026 
2027  if (mode == UNSORTED)
2028  {
2029  // Auto-detect mode.
2030  compare_fcn_type compare
2031  = safe_comparator (ASCENDING, *this, false);
2032 
2033  if (compare (elem (n-1), elem (0)))
2034  mode = DESCENDING;
2035  else
2036  mode = ASCENDING;
2037  }
2038 
2039  if (mode != UNSORTED)
2040  {
2041  lsort.set_compare (safe_comparator (mode, *this, false));
2042 
2043  if (! lsort.is_sorted (data (), n))
2044  mode = UNSORTED;
2045  }
2046 
2047  return mode;
2048 
2049 }
2050 
2051 template <class T>
2054 {
2056 
2057  octave_sort<T> lsort (safe_comparator (mode, *this, true));
2058 
2059  octave_idx_type r = rows (), c = cols ();
2060 
2061  idx = Array<octave_idx_type> (dim_vector (r, 1));
2062 
2063  lsort.sort_rows (data (), idx.fortran_vec (), r, c);
2064 
2065  return idx;
2066 }
2067 
2068 
2069 template <class T>
2070 sortmode
2072 {
2073  octave_sort<T> lsort;
2074 
2075  octave_idx_type r = rows (), c = cols ();
2076 
2077  if (r <= 1 || c == 0)
2078  return mode ? mode : ASCENDING;
2079 
2080  if (mode == UNSORTED)
2081  {
2082  // Auto-detect mode.
2083  compare_fcn_type compare
2084  = safe_comparator (ASCENDING, *this, false);
2085 
2086  octave_idx_type i;
2087  for (i = 0; i < cols (); i++)
2088  {
2089  T l = elem (0, i), u = elem (rows () - 1, i);
2090  if (compare (l, u))
2091  {
2092  if (mode == DESCENDING)
2093  {
2094  mode = UNSORTED;
2095  break;
2096  }
2097  else
2098  mode = ASCENDING;
2099  }
2100  else if (compare (u, l))
2101  {
2102  if (mode == ASCENDING)
2103  {
2104  mode = UNSORTED;
2105  break;
2106  }
2107  else
2108  mode = DESCENDING;
2109  }
2110  }
2111  if (mode == UNSORTED && i == cols ())
2112  mode = ASCENDING;
2113  }
2114 
2115  if (mode != UNSORTED)
2116  {
2117  lsort.set_compare (safe_comparator (mode, *this, false));
2118 
2119  if (! lsort.is_sorted_rows (data (), r, c))
2120  mode = UNSORTED;
2121  }
2122 
2123  return mode;
2124 
2125 }
2126 
2127 // Do a binary lookup in a sorted array.
2128 template <class T>
2130 Array<T>::lookup (const T& value, sortmode mode) const
2131 {
2132  octave_idx_type n = numel ();
2133  octave_sort<T> lsort;
2134 
2135  if (mode == UNSORTED)
2136  {
2137  // auto-detect mode
2138  if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2139  mode = DESCENDING;
2140  else
2141  mode = ASCENDING;
2142  }
2143 
2144  lsort.set_compare (mode);
2145 
2146  return lsort.lookup (data (), n, value);
2147 }
2148 
2149 template <class T>
2151 Array<T>::lookup (const Array<T>& values, sortmode mode) const
2152 {
2153  octave_idx_type n = numel (), nval = values.numel ();
2154  octave_sort<T> lsort;
2155  Array<octave_idx_type> idx (values.dims ());
2156 
2157  if (mode == UNSORTED)
2158  {
2159  // auto-detect mode
2160  if (n > 1 && lsort.descending_compare (elem (0), elem (n-1)))
2161  mode = DESCENDING;
2162  else
2163  mode = ASCENDING;
2164  }
2165 
2166  lsort.set_compare (mode);
2167 
2168  // This determines the split ratio between the O(M*log2(N)) and O(M+N)
2169  // algorithms.
2170  static const double ratio = 1.0;
2171  sortmode vmode = UNSORTED;
2172 
2173  // Attempt the O(M+N) algorithm if M is large enough.
2174  if (nval > ratio * n / xlog2 (n + 1.0))
2175  {
2176  vmode = values.is_sorted ();
2177  // The table must not contain a NaN.
2178  if ((vmode == ASCENDING && sort_isnan<T> (values(nval-1)))
2179  || (vmode == DESCENDING && sort_isnan<T> (values(0))))
2180  vmode = UNSORTED;
2181  }
2182 
2183  if (vmode != UNSORTED)
2184  lsort.lookup_sorted (data (), n, values.data (), nval,
2185  idx.fortran_vec (), vmode != mode);
2186  else
2187  lsort.lookup (data (), n, values.data (), nval, idx.fortran_vec ());
2188 
2189  return idx;
2190 }
2191 
2192 template <class T>
2194 Array<T>::nnz (void) const
2195 {
2196  const T *src = data ();
2197  octave_idx_type nel = nelem (), retval = 0;
2198  const T zero = T ();
2199  for (octave_idx_type i = 0; i < nel; i++)
2200  if (src[i] != zero)
2201  retval++;
2202 
2203  return retval;
2204 }
2205 
2206 template <class T>
2208 Array<T>::find (octave_idx_type n, bool backward) const
2209 {
2210  Array<octave_idx_type> retval;
2211  const T *src = data ();
2212  octave_idx_type nel = nelem ();
2213  const T zero = T ();
2214  if (n < 0 || n >= nel)
2215  {
2216  // We want all elements, which means we'll almost surely need
2217  // to resize. So count first, then allocate array of exact size.
2218  octave_idx_type cnt = 0;
2219  for (octave_idx_type i = 0; i < nel; i++)
2220  cnt += src[i] != zero;
2221 
2222  retval.clear (cnt, 1);
2223  octave_idx_type *dest = retval.fortran_vec ();
2224  for (octave_idx_type i = 0; i < nel; i++)
2225  if (src[i] != zero) *dest++ = i;
2226  }
2227  else
2228  {
2229  // We want a fixed max number of elements, usually small. So be
2230  // optimistic, alloc the array in advance, and then resize if
2231  // needed.
2232  retval.clear (n, 1);
2233  if (backward)
2234  {
2235  // Do the search as a series of successive single-element searches.
2236  octave_idx_type k = 0, l = nel - 1;
2237  for (; k < n; k++)
2238  {
2239  for (; l >= 0 && src[l] == zero; l--) ;
2240  if (l >= 0)
2241  retval(k) = l--;
2242  else
2243  break;
2244  }
2245  if (k < n)
2246  retval.resize2 (k, 1);
2247  octave_idx_type *rdata = retval.fortran_vec ();
2248  std::reverse (rdata, rdata + k);
2249  }
2250  else
2251  {
2252  // Do the search as a series of successive single-element searches.
2253  octave_idx_type k = 0, l = 0;
2254  for (; k < n; k++)
2255  {
2256  for (; l != nel && src[l] == zero; l++) ;
2257  if (l != nel)
2258  retval(k) = l++;
2259  else
2260  break;
2261  }
2262  if (k < n)
2263  retval.resize2 (k, 1);
2264  }
2265  }
2266 
2267  // Fixup return dimensions, for Matlab compatibility.
2268  // find (zeros (0,0)) -> zeros (0,0)
2269  // find (zeros (1,0)) -> zeros (1,0)
2270  // find (zeros (0,1)) -> zeros (0,1)
2271  // find (zeros (0,X)) -> zeros (0,1)
2272  // find (zeros (1,1)) -> zeros (0,0) !!!! WHY?
2273  // find (zeros (0,1,0)) -> zeros (0,0)
2274  // find (zeros (0,1,0,1)) -> zeros (0,0) etc
2275 
2276  if ((numel () == 1 && retval.is_empty ())
2277  || (rows () == 0 && dims ().numel (1) == 0))
2278  retval.dimensions = dim_vector ();
2279  else if (rows () == 1 && ndims () == 2)
2280  retval.dimensions = dim_vector (1, retval.length ());
2281 
2282  return retval;
2283 }
2284 
2285 template <class T>
2286 Array<T>
2287 Array<T>::nth_element (const idx_vector& n, int dim) const
2288 {
2289  if (dim < 0)
2290  {
2291  (*current_liboctave_error_handler)
2292  ("nth_element: invalid dimension");
2293  return Array<T> ();
2294  }
2295 
2296  dim_vector dv = dims ();
2297  if (dim >= dv.length ())
2298  dv.resize (dim+1, 1);
2299 
2300  octave_idx_type ns = dv(dim);
2301 
2302  octave_idx_type nn = n.length (ns);
2303 
2304  dv(dim) = std::min (nn, ns);
2306  dim = std::min (dv.length (), dim);
2307 
2308  Array<T> m (dv);
2309 
2310  if (m.numel () == 0)
2311  return m;
2312 
2313  sortmode mode = UNSORTED;
2314  octave_idx_type lo = 0;
2315 
2316  switch (n.idx_class ())
2317  {
2319  mode = ASCENDING;
2320  lo = n(0);
2321  break;
2323  {
2324  octave_idx_type inc = n.increment ();
2325  if (inc == 1)
2326  {
2327  mode = ASCENDING;
2328  lo = n(0);
2329  }
2330  else if (inc == -1)
2331  {
2332  mode = DESCENDING;
2333  lo = ns - 1 - n(0);
2334  }
2335  }
2336  default:
2337  break;
2338  }
2339 
2340  if (mode == UNSORTED)
2341  {
2342  (*current_liboctave_error_handler)
2343  ("nth_element: n must be a scalar or a contiguous range");
2344  return Array<T> ();
2345  }
2346 
2347  octave_idx_type up = lo + nn;
2348 
2349  if (lo < 0 || up > ns)
2350  {
2351  (*current_liboctave_error_handler)
2352  ("nth_element: invalid element index");
2353  return Array<T> ();
2354  }
2355 
2356  octave_idx_type iter = numel () / ns;
2357  octave_idx_type stride = 1;
2358 
2359  for (int i = 0; i < dim; i++)
2360  stride *= dv(i);
2361 
2362  T *v = m.fortran_vec ();
2363  const T *ov = data ();
2364 
2365  OCTAVE_LOCAL_BUFFER (T, buf, ns);
2366 
2367  octave_sort<T> lsort;
2368  lsort.set_compare (mode);
2369 
2370  for (octave_idx_type j = 0; j < iter; j++)
2371  {
2372  octave_idx_type kl = 0, ku = ns;
2373 
2374  if (stride == 1)
2375  {
2376  // copy without NaNs.
2377  // FIXME: impact on integer types noticeable?
2378  for (octave_idx_type i = 0; i < ns; i++)
2379  {
2380  T tmp = ov[i];
2381  if (sort_isnan<T> (tmp))
2382  buf[--ku] = tmp;
2383  else
2384  buf[kl++] = tmp;
2385  }
2386 
2387  ov += ns;
2388  }
2389  else
2390  {
2391  octave_idx_type offset = j % stride;
2392  // copy without NaNs.
2393  // FIXME: impact on integer types noticeable?
2394  for (octave_idx_type i = 0; i < ns; i++)
2395  {
2396  T tmp = ov[offset + i*stride];
2397  if (sort_isnan<T> (tmp))
2398  buf[--ku] = tmp;
2399  else
2400  buf[kl++] = tmp;
2401  }
2402 
2403  if (offset == stride-1)
2404  ov += ns*stride;
2405  }
2406 
2407  if (ku == ns)
2408  lsort.nth_element (buf, ns, lo, up);
2409  else if (mode == ASCENDING)
2410  lsort.nth_element (buf, ku, lo, std::min (ku, up));
2411  else
2412  {
2413  octave_idx_type nnan = ns - ku;
2414  octave_idx_type zero = 0;
2415  lsort.nth_element (buf, ku, std::max (lo - nnan, zero),
2416  std::max (up - nnan, zero));
2417  std::rotate (buf, buf + ku, buf + ns);
2418  }
2419 
2420  if (stride == 1)
2421  {
2422  for (octave_idx_type i = 0; i < nn; i++)
2423  v[i] = buf[lo + i];
2424 
2425  v += nn;
2426  }
2427  else
2428  {
2429  octave_idx_type offset = j % stride;
2430  for (octave_idx_type i = 0; i < nn; i++)
2431  v[offset + stride * i] = buf[lo + i];
2432  if (offset == stride-1)
2433  v += nn*stride;
2434  }
2435  }
2436 
2437  return m;
2438 }
2439 
2440 
2441 #define INSTANTIATE_ARRAY_SORT(T) template class OCTAVE_API octave_sort<T>;
2442 
2443 #define NO_INSTANTIATE_ARRAY_SORT(T) \
2444  \
2445 template <> Array<T> \
2446 Array<T>::sort (int, sortmode) const { return *this; } \
2447  \
2448 template <> Array<T> \
2449 Array<T>::sort (Array<octave_idx_type> &sidx, int, sortmode) const \
2450 { sidx = Array<octave_idx_type> (); return *this; } \
2451  \
2452 template <> sortmode \
2453 Array<T>::is_sorted (sortmode) const \
2454 { return UNSORTED; } \
2455  \
2456 Array<T>::compare_fcn_type \
2457 safe_comparator (sortmode, const Array<T>&, bool) \
2458 { return 0; } \
2459  \
2460 template <> Array<octave_idx_type> \
2461 Array<T>::sort_rows_idx (sortmode) const \
2462 { return Array<octave_idx_type> (); } \
2463  \
2464 template <> sortmode \
2465 Array<T>::is_sorted_rows (sortmode) const \
2466 { return UNSORTED; } \
2467  \
2468 template <> octave_idx_type \
2469 Array<T>::lookup (T const &, sortmode) const \
2470 { return 0; } \
2471 template <> Array<octave_idx_type> \
2472 Array<T>::lookup (const Array<T>&, sortmode) const \
2473 { return Array<octave_idx_type> (); } \
2474  \
2475 template <> octave_idx_type \
2476 Array<T>::nnz (void) const\
2477 { return 0; } \
2478 template <> Array<octave_idx_type> \
2479 Array<T>::find (octave_idx_type, bool) const\
2480 { return Array<octave_idx_type> (); } \
2481  \
2482 template <> Array<T> \
2483 Array<T>::nth_element (const idx_vector&, int) const { return Array<T> (); } \
2484 
2485 
2486 template <class T>
2487 Array<T>
2489 {
2490  dim_vector dv = dims ();
2491  octave_idx_type nd = dv.length ();
2492  Array<T> d;
2493 
2494  if (nd > 2)
2495  (*current_liboctave_error_handler) ("Matrix must be 2-dimensional");
2496  else
2497  {
2498  octave_idx_type nnr = dv (0);
2499  octave_idx_type nnc = dv (1);
2500 
2501  if (nnr == 0 && nnc == 0)
2502  ; // do nothing for empty matrix
2503  else if (nnr != 1 && nnc != 1)
2504  {
2505  // Extract diag from matrix
2506  if (k > 0)
2507  nnc -= k;
2508  else if (k < 0)
2509  nnr += k;
2510 
2511  if (nnr > 0 && nnc > 0)
2512  {
2513  octave_idx_type ndiag = (nnr < nnc) ? nnr : nnc;
2514 
2515  d.resize (dim_vector (ndiag, 1));
2516 
2517  if (k > 0)
2518  {
2519  for (octave_idx_type i = 0; i < ndiag; i++)
2520  d.xelem (i) = elem (i, i+k);
2521  }
2522  else if (k < 0)
2523  {
2524  for (octave_idx_type i = 0; i < ndiag; i++)
2525  d.xelem (i) = elem (i-k, i);
2526  }
2527  else
2528  {
2529  for (octave_idx_type i = 0; i < ndiag; i++)
2530  d.xelem (i) = elem (i, i);
2531  }
2532  }
2533  else
2535  ("diag: requested diagonal out of range");
2536  }
2537  else
2538  {
2539  // Create diag matrix from vector
2540  octave_idx_type roff = 0;
2541  octave_idx_type coff = 0;
2542  if (k > 0)
2543  {
2544  roff = 0;
2545  coff = k;
2546  }
2547  else if (k < 0)
2548  {
2549  roff = -k;
2550  coff = 0;
2551  }
2552 
2553  if (nnr == 1)
2554  {
2555  octave_idx_type n = nnc + std::abs (k);
2556  d = Array<T> (dim_vector (n, n), resize_fill_value ());
2557 
2558  for (octave_idx_type i = 0; i < nnc; i++)
2559  d.xelem (i+roff, i+coff) = elem (0, i);
2560  }
2561  else
2562  {
2563  octave_idx_type n = nnr + std::abs (k);
2564  d = Array<T> (dim_vector (n, n), resize_fill_value ());
2565 
2566  for (octave_idx_type i = 0; i < nnr; i++)
2567  d.xelem (i+roff, i+coff) = elem (i, 0);
2568  }
2569  }
2570  }
2571 
2572  return d;
2573 }
2574 
2575 template <class T>
2576 Array<T>
2578 {
2579  Array<T> retval;
2580 
2581  if (ndims () == 2 && (rows () == 1 || cols () == 1))
2582  {
2583  retval.resize (dim_vector (m, n), resize_fill_value ());
2584 
2585  for (octave_idx_type i = 0; i < numel (); i++)
2586  retval.xelem (i, i) = xelem (i);
2587  }
2588  else
2590  ("cat: invalid dimension");
2591 
2592  return retval;
2593 }
2594 
2595 template <class T>
2596 Array<T>
2597 Array<T>::cat (int dim, octave_idx_type n, const Array<T> *array_list)
2598 {
2599  // Default concatenation.
2600  bool (dim_vector::*concat_rule) (const dim_vector&, int) = &dim_vector::concat;
2601 
2602  if (dim == -1 || dim == -2)
2603  {
2604  concat_rule = &dim_vector::hvcat;
2605  dim = -dim - 1;
2606  }
2607  else if (dim < 0)
2608  (*current_liboctave_error_handler)
2609  ("cat: invalid dimension");
2610 
2611  if (n == 1)
2612  return array_list[0];
2613  else if (n == 0)
2614  return Array<T> ();
2615 
2616  // Special case:
2617  //
2618  // cat (dim, [], ..., [], A, ...)
2619  //
2620  // with dim > 2, A not 0x0, and at least three arguments to
2621  // concatenate is equivalent to
2622  //
2623  // cat (dim, A, ...)
2624  //
2625  // Note that this check must be performed here because for full-on
2626  // braindead Matlab compatibility, we need to have things like
2627  //
2628  // cat (3, [], [], A)
2629  //
2630  // succeed, but to have things like
2631  //
2632  // cat (3, cat (3, [], []), A)
2633  // cat (3, zeros (0, 0, 2), A)
2634  //
2635  // fail. See also bug report #31615.
2636 
2637  octave_idx_type istart = 0;
2638 
2639  if (n > 2 && dim > 1)
2640  {
2641  for (octave_idx_type i = 0; i < n; i++)
2642  {
2643  dim_vector dv = array_list[i].dims ();
2644 
2645  if (dv.zero_by_zero ())
2646  istart++;
2647  else
2648  break;
2649  }
2650 
2651  // Don't skip any initial aguments if they are all empty.
2652  if (istart >= n)
2653  istart = 0;
2654  }
2655 
2656  dim_vector dv = array_list[istart++].dims ();
2657 
2658  for (octave_idx_type i = istart; i < n; i++)
2659  if (! (dv.*concat_rule) (array_list[i].dims (), dim))
2660  (*current_liboctave_error_handler)
2661  ("cat: dimension mismatch");
2662 
2663  Array<T> retval (dv);
2664 
2665  if (retval.is_empty ())
2666  return retval;
2667 
2668  int nidx = std::max (dv.length (), dim + 1);
2670  octave_idx_type l = 0;
2671 
2672  for (octave_idx_type i = 0; i < n; i++)
2673  {
2674  // NOTE: This takes some thinking, but no matter what the above rules
2675  // are, an empty array can always be skipped at this point, because
2676  // the result dimensions are already determined, and there is no way
2677  // an empty array may contribute a nonzero piece along the dimension
2678  // at this point, unless an empty array can be promoted to a non-empty
2679  // one (which makes no sense). I repeat, *no way*, think about it.
2680  if (array_list[i].is_empty ())
2681  continue;
2682 
2683  octave_quit ();
2684 
2685  octave_idx_type u;
2686  if (dim < array_list[i].ndims ())
2687  u = l + array_list[i].dims ()(dim);
2688  else
2689  u = l + 1;
2690 
2691  idxa(dim) = idx_vector (l, u);
2692 
2693  retval.assign (idxa, array_list[i]);
2694 
2695  l = u;
2696  }
2697 
2698  return retval;
2699 }
2700 
2701 template <class T>
2702 void
2703 Array<T>::print_info (std::ostream& os, const std::string& prefix) const
2704 {
2705  os << prefix << "rep address: " << rep << '\n'
2706  << prefix << "rep->len: " << rep->len << '\n'
2707  << prefix << "rep->data: " << static_cast<void *> (rep->data) << '\n'
2708  << prefix << "rep->count: " << rep->count << '\n'
2709  << prefix << "slice_data: " << static_cast<void *> (slice_data) << '\n'
2710  << prefix << "slice_len: " << slice_len << '\n';
2711 
2712  // 2D info:
2713  //
2714  // << pefix << "rows: " << rows () << "\n"
2715  // << prefix << "cols: " << cols () << "\n";
2716 }
2717 
2718 template <class T>
2720 {
2721  bool retval = dimensions == dv;
2722  if (retval)
2723  dimensions = dv;
2724 
2725  return retval;
2726 }
2727 
2728 template <class T>
2730 {
2731  // This guards against accidental implicit instantiations.
2732  // Array<T> instances should always be explicit and use INSTANTIATE_ARRAY.
2733  T::__xXxXx__ ();
2734 }
2735 
2736 #define INSTANTIATE_ARRAY(T, API) \
2737  template <> void Array<T>::instantiation_guard () { } \
2738  template class API Array<T>
2739 
2740 // FIXME: is this used?
2741 
2742 template <class T>
2743 std::ostream&
2744 operator << (std::ostream& os, const Array<T>& a)
2745 {
2746  dim_vector a_dims = a.dims ();
2747 
2748  int n_dims = a_dims.length ();
2749 
2750  os << n_dims << "-dimensional array";
2751 
2752  if (n_dims)
2753  os << " (" << a_dims.str () << ")";
2754 
2755  os <<"\n\n";
2756 
2757  if (n_dims)
2758  {
2759  os << "data:";
2760 
2761  Array<octave_idx_type> ra_idx (dim_vector (n_dims, 1), 0);
2762 
2763  // Number of times the first 2d-array is to be displayed.
2764 
2765  octave_idx_type m = 1;
2766  for (int i = 2; i < n_dims; i++)
2767  m *= a_dims(i);
2768 
2769  if (m == 1)
2770  {
2771  octave_idx_type rows = 0;
2772  octave_idx_type cols = 0;
2773 
2774  switch (n_dims)
2775  {
2776  case 2:
2777  rows = a_dims(0);
2778  cols = a_dims(1);
2779 
2780  for (octave_idx_type j = 0; j < rows; j++)
2781  {
2782  ra_idx(0) = j;
2783  for (octave_idx_type k = 0; k < cols; k++)
2784  {
2785  ra_idx(1) = k;
2786  os << " " << a.elem (ra_idx);
2787  }
2788  os << "\n";
2789  }
2790  break;
2791 
2792  default:
2793  rows = a_dims(0);
2794 
2795  for (octave_idx_type k = 0; k < rows; k++)
2796  {
2797  ra_idx(0) = k;
2798  os << " " << a.elem (ra_idx);
2799  }
2800  break;
2801  }
2802 
2803  os << "\n";
2804  }
2805  else
2806  {
2807  octave_idx_type rows = a_dims(0);
2808  octave_idx_type cols = a_dims(1);
2809 
2810  for (int i = 0; i < m; i++)
2811  {
2812  os << "\n(:,:,";
2813 
2814  for (int j = 2; j < n_dims - 1; j++)
2815  os << ra_idx(j) + 1 << ",";
2816 
2817  os << ra_idx(n_dims - 1) + 1 << ") = \n";
2818 
2819  for (octave_idx_type j = 0; j < rows; j++)
2820  {
2821  ra_idx(0) = j;
2822 
2823  for (octave_idx_type k = 0; k < cols; k++)
2824  {
2825  ra_idx(1) = k;
2826  os << " " << a.elem (ra_idx);
2827  }
2828 
2829  os << "\n";
2830  }
2831 
2832  os << "\n";
2833 
2834  if (i != m - 1)
2835  increment_index (ra_idx, a_dims, 2);
2836  }
2837  }
2838  }
2839 
2840  return os;
2841 }