GNU Octave
4.2.1
A high-level interpreted language, primarily intended for numerical computations, mostly compatible with Matlab
Main Page
Namespaces
Classes
Files
File List
File Members
All
Classes
Namespaces
Files
Functions
Variables
Typedefs
Enumerations
Enumerator
Properties
Friends
Macros
Pages
liboctave
operators
Sparse-op-defs.h
Go to the documentation of this file.
1
/*
2
3
Copyright (C) 2004-2017 David Bateman
4
Copyright (C) 1998-2004 Andy Adler
5
Copyright (C) 2008 Jaroslav Hajek
6
7
This file is part of Octave.
8
9
Octave is free software; you can redistribute it and/or modify it
10
under the terms of the GNU General Public License as published by the
11
Free Software Foundation; either version 3 of the License, or (at your
12
option) any later version.
13
14
Octave is distributed in the hope that it will be useful, but WITHOUT
15
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17
for more details.
18
19
You should have received a copy of the GNU General Public License
20
along with Octave; see the file COPYING. If not, see
21
<http://www.gnu.org/licenses/>.
22
23
*/
24
25
#if ! defined (octave_Sparse_op_defs_h)
26
#define octave_Sparse_op_defs_h 1
27
28
#include "octave-config.h"
29
30
#include "
Array-util.h
"
31
#include "
oct-locbuf.h
"
32
#include "
mx-inlines.cc
"
33
34
// sparse matrix by scalar operations.
35
36
#define SPARSE_SMS_BIN_OP_1(R, F, OP, M, S) \
37
R \
38
F (const M& m, const S& s) \
39
{ \
40
octave_idx_type nr = m.rows (); \
41
octave_idx_type nc = m.cols (); \
42
\
43
R r (nr, nc, (0.0 OP s)); \
44
\
45
for (octave_idx_type j = 0; j < nc; j++) \
46
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
47
r.xelem (m.ridx (i), j) = m.data (i) OP s; \
48
return r; \
49
}
50
51
#define SPARSE_SMS_BIN_OP_2(R, F, OP, M, S) \
52
R \
53
F (const M& m, const S& s) \
54
{ \
55
octave_idx_type nr = m.rows (); \
56
octave_idx_type nc = m.cols (); \
57
octave_idx_type nz = m.nnz (); \
58
\
59
R r (nr, nc, nz); \
60
\
61
for (octave_idx_type i = 0; i < nz; i++) \
62
{ \
63
r.xdata (i) = m.data (i) OP s; \
64
r.xridx (i) = m.ridx (i); \
65
} \
66
for (octave_idx_type i = 0; i < nc + 1; i++) \
67
r.xcidx (i) = m.cidx (i); \
68
\
69
r.maybe_compress (true); \
70
return r; \
71
}
72
73
#define SPARSE_SMS_BIN_OPS(R1, R2, M, S) \
74
SPARSE_SMS_BIN_OP_1 (R1, operator +, +, M, S) \
75
SPARSE_SMS_BIN_OP_1 (R1, operator -, -, M, S) \
76
SPARSE_SMS_BIN_OP_2 (R2, operator *, *, M, S) \
77
SPARSE_SMS_BIN_OP_2 (R2, operator /, /, M, S)
78
79
#define SPARSE_SMS_CMP_OP(F, OP, M, MZ, MC, S, SZ, SC) \
80
SparseBoolMatrix \
81
F (const M& m, const S& s) \
82
{ \
83
octave_idx_type nr = m.rows (); \
84
octave_idx_type nc = m.cols (); \
85
SparseBoolMatrix r; \
86
\
87
if (MC (MZ) OP SC (s)) \
88
{ \
89
r = SparseBoolMatrix (nr, nc, true); \
90
for (octave_idx_type j = 0; j < nc; j++) \
91
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
92
if (! (MC (m.data (i)) OP SC (s))) \
93
r.data (m.ridx (i) + j * nr) = false; \
94
r.maybe_compress (true); \
95
} \
96
else \
97
{ \
98
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
99
r.cidx (0) = static_cast<octave_idx_type> (0); \
100
octave_idx_type nel = 0; \
101
for (octave_idx_type j = 0; j < nc; j++) \
102
{ \
103
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
104
if (MC (m.data (i)) OP SC (s)) \
105
{ \
106
r.ridx (nel) = m.ridx (i); \
107
r.data (nel++) = true; \
108
} \
109
r.cidx (j + 1) = nel; \
110
} \
111
r.maybe_compress (false); \
112
} \
113
return r; \
114
}
115
116
#define SPARSE_SMS_CMP_OPS(M, MZ, CM, S, SZ, CS) \
117
SPARSE_SMS_CMP_OP (mx_el_lt, <, M, MZ, , S, SZ, ) \
118
SPARSE_SMS_CMP_OP (mx_el_le, <=, M, MZ, , S, SZ, ) \
119
SPARSE_SMS_CMP_OP (mx_el_ge, >=, M, MZ, , S, SZ, ) \
120
SPARSE_SMS_CMP_OP (mx_el_gt, >, M, MZ, , S, SZ, ) \
121
SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, MZ, , S, SZ, ) \
122
SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, MZ, , S, SZ, )
123
124
#define SPARSE_SMS_EQNE_OPS(M, MZ, CM, S, SZ, CS) \
125
SPARSE_SMS_CMP_OP (mx_el_eq, ==, M, MZ, , S, SZ, ) \
126
SPARSE_SMS_CMP_OP (mx_el_ne, !=, M, MZ, , S, SZ, )
127
128
#define SPARSE_SMS_BOOL_OP(F, OP, M, S, LHS_ZERO, RHS_ZERO) \
129
SparseBoolMatrix \
130
F (const M& m, const S& s) \
131
{ \
132
octave_idx_type nr = m.rows (); \
133
octave_idx_type nc = m.cols (); \
134
SparseBoolMatrix r; \
135
\
136
if (nr > 0 && nc > 0) \
137
{ \
138
if (LHS_ZERO OP (s != RHS_ZERO)) \
139
{ \
140
r = SparseBoolMatrix (nr, nc, true); \
141
for (octave_idx_type j = 0; j < nc; j++) \
142
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
143
if (! ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO))) \
144
r.data (m.ridx (i) + j * nr) = false; \
145
r.maybe_compress (true); \
146
} \
147
else \
148
{ \
149
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
150
r.cidx (0) = static_cast<octave_idx_type> (0); \
151
octave_idx_type nel = 0; \
152
for (octave_idx_type j = 0; j < nc; j++) \
153
{ \
154
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
155
if ((m.data (i) != LHS_ZERO) OP (s != RHS_ZERO)) \
156
{ \
157
r.ridx (nel) = m.ridx (i); \
158
r.data (nel++) = true; \
159
} \
160
r.cidx (j + 1) = nel; \
161
} \
162
r.maybe_compress (false); \
163
} \
164
} \
165
return r; \
166
}
167
168
#define SPARSE_SMS_BOOL_OPS2(M, S, LHS_ZERO, RHS_ZERO) \
169
SPARSE_SMS_BOOL_OP (mx_el_and, &&, M, S, LHS_ZERO, RHS_ZERO) \
170
SPARSE_SMS_BOOL_OP (mx_el_or, ||, M, S, LHS_ZERO, RHS_ZERO)
171
172
#define SPARSE_SMS_BOOL_OPS(M, S, ZERO) \
173
SPARSE_SMS_BOOL_OPS2(M, S, ZERO, ZERO)
174
175
// scalar by sparse matrix operations.
176
177
#define SPARSE_SSM_BIN_OP_1(R, F, OP, S, M) \
178
R \
179
F (const S& s, const M& m) \
180
{ \
181
octave_idx_type nr = m.rows (); \
182
octave_idx_type nc = m.cols (); \
183
\
184
R r (nr, nc, (s OP 0.0)); \
185
\
186
for (octave_idx_type j = 0; j < nc; j++) \
187
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
188
r.xelem (m.ridx (i), j) = s OP m.data (i); \
189
\
190
return r; \
191
}
192
193
#define SPARSE_SSM_BIN_OP_2(R, F, OP, S, M) \
194
R \
195
F (const S& s, const M& m) \
196
{ \
197
octave_idx_type nr = m.rows (); \
198
octave_idx_type nc = m.cols (); \
199
octave_idx_type nz = m.nnz (); \
200
\
201
R r (nr, nc, nz); \
202
\
203
for (octave_idx_type i = 0; i < nz; i++) \
204
{ \
205
r.xdata (i) = s OP m.data (i); \
206
r.xridx (i) = m.ridx (i); \
207
} \
208
for (octave_idx_type i = 0; i < nc + 1; i++) \
209
r.xcidx (i) = m.cidx (i); \
210
\
211
r.maybe_compress(true); \
212
return r; \
213
}
214
215
#define SPARSE_SSM_BIN_OPS(R1, R2, S, M) \
216
SPARSE_SSM_BIN_OP_1 (R1, operator +, +, S, M) \
217
SPARSE_SSM_BIN_OP_1 (R1, operator -, -, S, M) \
218
SPARSE_SSM_BIN_OP_2 (R2, operator *, *, S, M) \
219
SPARSE_SSM_BIN_OP_2 (R2, operator /, /, S, M)
220
221
#define SPARSE_SSM_CMP_OP(F, OP, S, SZ, SC, M, MZ, MC) \
222
SparseBoolMatrix \
223
F (const S& s, const M& m) \
224
{ \
225
octave_idx_type nr = m.rows (); \
226
octave_idx_type nc = m.cols (); \
227
SparseBoolMatrix r; \
228
\
229
if (SC (s) OP SC (MZ)) \
230
{ \
231
r = SparseBoolMatrix (nr, nc, true); \
232
for (octave_idx_type j = 0; j < nc; j++) \
233
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
234
if (! (SC (s) OP MC (m.data (i)))) \
235
r.data (m.ridx (i) + j * nr) = false; \
236
r.maybe_compress (true); \
237
} \
238
else \
239
{ \
240
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
241
r.cidx (0) = static_cast<octave_idx_type> (0); \
242
octave_idx_type nel = 0; \
243
for (octave_idx_type j = 0; j < nc; j++) \
244
{ \
245
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
246
if (SC (s) OP MC (m.data (i))) \
247
{ \
248
r.ridx (nel) = m.ridx (i); \
249
r.data (nel++) = true; \
250
} \
251
r.cidx (j + 1) = nel; \
252
} \
253
r.maybe_compress (false); \
254
} \
255
return r; \
256
}
257
258
#define SPARSE_SSM_CMP_OPS(S, SZ, SC, M, MZ, MC) \
259
SPARSE_SSM_CMP_OP (mx_el_lt, <, S, SZ, , M, MZ, ) \
260
SPARSE_SSM_CMP_OP (mx_el_le, <=, S, SZ, , M, MZ, ) \
261
SPARSE_SSM_CMP_OP (mx_el_ge, >=, S, SZ, , M, MZ, ) \
262
SPARSE_SSM_CMP_OP (mx_el_gt, >, S, SZ, , M, MZ, ) \
263
SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, SZ, , M, MZ, ) \
264
SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, SZ, , M, MZ, )
265
266
#define SPARSE_SSM_EQNE_OPS(S, SZ, SC, M, MZ, MC) \
267
SPARSE_SSM_CMP_OP (mx_el_eq, ==, S, SZ, , M, MZ, ) \
268
SPARSE_SSM_CMP_OP (mx_el_ne, !=, S, SZ, , M, MZ, )
269
270
#define SPARSE_SSM_BOOL_OP(F, OP, S, M, LHS_ZERO, RHS_ZERO) \
271
SparseBoolMatrix \
272
F (const S& s, const M& m) \
273
{ \
274
octave_idx_type nr = m.rows (); \
275
octave_idx_type nc = m.cols (); \
276
SparseBoolMatrix r; \
277
\
278
if (nr > 0 && nc > 0) \
279
{ \
280
if ((s != LHS_ZERO) OP RHS_ZERO) \
281
{ \
282
r = SparseBoolMatrix (nr, nc, true); \
283
for (octave_idx_type j = 0; j < nc; j++) \
284
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
285
if (! ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO))) \
286
r.data (m.ridx (i) + j * nr) = false; \
287
r.maybe_compress (true); \
288
} \
289
else \
290
{ \
291
r = SparseBoolMatrix (nr, nc, m.nnz ()); \
292
r.cidx (0) = static_cast<octave_idx_type> (0); \
293
octave_idx_type nel = 0; \
294
for (octave_idx_type j = 0; j < nc; j++) \
295
{ \
296
for (octave_idx_type i = m.cidx (j); i < m.cidx (j+1); i++) \
297
if ((s != LHS_ZERO) OP (m.data (i) != RHS_ZERO)) \
298
{ \
299
r.ridx (nel) = m.ridx (i); \
300
r.data (nel++) = true; \
301
} \
302
r.cidx (j + 1) = nel; \
303
} \
304
r.maybe_compress (false); \
305
} \
306
} \
307
return r; \
308
}
309
310
#define SPARSE_SSM_BOOL_OPS2(S, M, LHS_ZERO, RHS_ZERO) \
311
SPARSE_SSM_BOOL_OP (mx_el_and, &&, S, M, LHS_ZERO, RHS_ZERO) \
312
SPARSE_SSM_BOOL_OP (mx_el_or, ||, S, M, LHS_ZERO, RHS_ZERO)
313
314
#define SPARSE_SSM_BOOL_OPS(S, M, ZERO) \
315
SPARSE_SSM_BOOL_OPS2(S, M, ZERO, ZERO)
316
317
// sparse matrix by sparse matrix operations.
318
319
#define SPARSE_SMSM_BIN_OP_1(R, F, OP, M1, M2) \
320
R \
321
F (const M1& m1, const M2& m2) \
322
{ \
323
R r; \
324
\
325
octave_idx_type m1_nr = m1.rows (); \
326
octave_idx_type m1_nc = m1.cols (); \
327
\
328
octave_idx_type m2_nr = m2.rows (); \
329
octave_idx_type m2_nc = m2.cols (); \
330
\
331
if (m1_nr == 1 && m1_nc == 1) \
332
{ \
333
if (m1.elem (0,0) == 0.) \
334
r = OP R (m2); \
335
else \
336
{ \
337
r = R (m2_nr, m2_nc, m1.data (0) OP 0.); \
338
\
339
for (octave_idx_type j = 0 ; j < m2_nc ; j++) \
340
{ \
341
octave_quit (); \
342
octave_idx_type idxj = j * m2_nr; \
343
for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \
344
{ \
345
octave_quit (); \
346
r.data (idxj + m2.ridx (i)) = m1.data (0) OP m2.data (i); \
347
} \
348
} \
349
r.maybe_compress (); \
350
} \
351
} \
352
else if (m2_nr == 1 && m2_nc == 1) \
353
{ \
354
if (m2.elem (0,0) == 0.) \
355
r = R (m1); \
356
else \
357
{ \
358
r = R (m1_nr, m1_nc, 0. OP m2.data (0)); \
359
\
360
for (octave_idx_type j = 0 ; j < m1_nc ; j++) \
361
{ \
362
octave_quit (); \
363
octave_idx_type idxj = j * m1_nr; \
364
for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \
365
{ \
366
octave_quit (); \
367
r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.data (0); \
368
} \
369
} \
370
r.maybe_compress (); \
371
} \
372
} \
373
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
374
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
375
else \
376
{ \
377
r = R (m1_nr, m1_nc, (m1.nnz () + m2.nnz ())); \
378
\
379
octave_idx_type jx = 0; \
380
r.cidx (0) = 0; \
381
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
382
{ \
383
octave_idx_type ja = m1.cidx (i); \
384
octave_idx_type ja_max = m1.cidx (i+1); \
385
bool ja_lt_max = ja < ja_max; \
386
\
387
octave_idx_type jb = m2.cidx (i); \
388
octave_idx_type jb_max = m2.cidx (i+1); \
389
bool jb_lt_max = jb < jb_max; \
390
\
391
while (ja_lt_max || jb_lt_max) \
392
{ \
393
octave_quit (); \
394
if ((! jb_lt_max) || \
395
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
396
{ \
397
r.ridx (jx) = m1.ridx (ja); \
398
r.data (jx) = m1.data (ja) OP 0.; \
399
jx++; \
400
ja++; \
401
ja_lt_max= ja < ja_max; \
402
} \
403
else if ((! ja_lt_max) || \
404
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \
405
{ \
406
r.ridx (jx) = m2.ridx (jb); \
407
r.data (jx) = 0. OP m2.data (jb); \
408
jx++; \
409
jb++; \
410
jb_lt_max= jb < jb_max; \
411
} \
412
else \
413
{ \
414
if ((m1.data (ja) OP m2.data (jb)) != 0.) \
415
{ \
416
r.data (jx) = m1.data (ja) OP m2.data (jb); \
417
r.ridx (jx) = m1.ridx (ja); \
418
jx++; \
419
} \
420
ja++; \
421
ja_lt_max= ja < ja_max; \
422
jb++; \
423
jb_lt_max= jb < jb_max; \
424
} \
425
} \
426
r.cidx (i+1) = jx; \
427
} \
428
\
429
r.maybe_compress (); \
430
} \
431
\
432
return r; \
433
}
434
435
#define SPARSE_SMSM_BIN_OP_2(R, F, OP, M1, M2) \
436
R \
437
F (const M1& m1, const M2& m2) \
438
{ \
439
R r; \
440
\
441
octave_idx_type m1_nr = m1.rows (); \
442
octave_idx_type m1_nc = m1.cols (); \
443
\
444
octave_idx_type m2_nr = m2.rows (); \
445
octave_idx_type m2_nc = m2.cols (); \
446
\
447
if (m1_nr == 1 && m1_nc == 1) \
448
{ \
449
if (m1.elem (0,0) == 0.) \
450
r = R (m2_nr, m2_nc); \
451
else \
452
{ \
453
r = R (m2); \
454
octave_idx_type m2_nnz = m2.nnz (); \
455
\
456
for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \
457
{ \
458
octave_quit (); \
459
r.data (i) = m1.data (0) OP r.data (i); \
460
} \
461
r.maybe_compress (); \
462
} \
463
} \
464
else if (m2_nr == 1 && m2_nc == 1) \
465
{ \
466
if (m2.elem (0,0) == 0.) \
467
r = R (m1_nr, m1_nc); \
468
else \
469
{ \
470
r = R (m1); \
471
octave_idx_type m1_nnz = m1.nnz (); \
472
\
473
for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \
474
{ \
475
octave_quit (); \
476
r.data (i) = r.data (i) OP m2.data (0); \
477
} \
478
r.maybe_compress (); \
479
} \
480
} \
481
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
482
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
483
else \
484
{ \
485
r = R (m1_nr, m1_nc, (m1.nnz () > m2.nnz () ? m1.nnz () : m2.nnz ())); \
486
\
487
octave_idx_type jx = 0; \
488
r.cidx (0) = 0; \
489
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
490
{ \
491
octave_idx_type ja = m1.cidx (i); \
492
octave_idx_type ja_max = m1.cidx (i+1); \
493
bool ja_lt_max = ja < ja_max; \
494
\
495
octave_idx_type jb = m2.cidx (i); \
496
octave_idx_type jb_max = m2.cidx (i+1); \
497
bool jb_lt_max = jb < jb_max; \
498
\
499
while (ja_lt_max || jb_lt_max) \
500
{ \
501
octave_quit (); \
502
if ((! jb_lt_max) || \
503
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
504
{ \
505
ja++; ja_lt_max= ja < ja_max; \
506
} \
507
else if ((! ja_lt_max) || \
508
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \
509
{ \
510
jb++; jb_lt_max= jb < jb_max; \
511
} \
512
else \
513
{ \
514
if ((m1.data (ja) OP m2.data (jb)) != 0.) \
515
{ \
516
r.data (jx) = m1.data (ja) OP m2.data (jb); \
517
r.ridx (jx) = m1.ridx (ja); \
518
jx++; \
519
} \
520
ja++; ja_lt_max= ja < ja_max; \
521
jb++; jb_lt_max= jb < jb_max; \
522
} \
523
} \
524
r.cidx (i+1) = jx; \
525
} \
526
\
527
r.maybe_compress (); \
528
} \
529
\
530
return r; \
531
}
532
533
#define SPARSE_SMSM_BIN_OP_3(R, F, OP, M1, M2) \
534
R \
535
F (const M1& m1, const M2& m2) \
536
{ \
537
R r; \
538
\
539
octave_idx_type m1_nr = m1.rows (); \
540
octave_idx_type m1_nc = m1.cols (); \
541
\
542
octave_idx_type m2_nr = m2.rows (); \
543
octave_idx_type m2_nc = m2.cols (); \
544
\
545
if (m1_nr == 1 && m1_nc == 1) \
546
{ \
547
if ((m1.elem (0,0) OP Complex ()) == Complex ()) \
548
{ \
549
octave_idx_type m2_nnz = m2.nnz (); \
550
r = R (m2); \
551
for (octave_idx_type i = 0 ; i < m2_nnz ; i++) \
552
r.data (i) = m1.elem (0,0) OP r.data (i); \
553
r.maybe_compress (); \
554
} \
555
else \
556
{ \
557
r = R (m2_nr, m2_nc, m1.elem (0,0) OP Complex ()); \
558
for (octave_idx_type j = 0 ; j < m2_nc ; j++) \
559
{ \
560
octave_quit (); \
561
octave_idx_type idxj = j * m2_nr; \
562
for (octave_idx_type i = m2.cidx (j) ; i < m2.cidx (j+1) ; i++) \
563
{ \
564
octave_quit (); \
565
r.data (idxj + m2.ridx (i)) = m1.elem (0,0) OP m2.data (i); \
566
} \
567
} \
568
r.maybe_compress (); \
569
} \
570
} \
571
else if (m2_nr == 1 && m2_nc == 1) \
572
{ \
573
if ((Complex () OP m1.elem (0,0)) == Complex ()) \
574
{ \
575
octave_idx_type m1_nnz = m1.nnz (); \
576
r = R (m1); \
577
for (octave_idx_type i = 0 ; i < m1_nnz ; i++) \
578
r.data (i) = r.data (i) OP m2.elem (0,0); \
579
r.maybe_compress (); \
580
} \
581
else \
582
{ \
583
r = R (m1_nr, m1_nc, Complex () OP m2.elem (0,0)); \
584
for (octave_idx_type j = 0 ; j < m1_nc ; j++) \
585
{ \
586
octave_quit (); \
587
octave_idx_type idxj = j * m1_nr; \
588
for (octave_idx_type i = m1.cidx (j) ; i < m1.cidx (j+1) ; i++) \
589
{ \
590
octave_quit (); \
591
r.data (idxj + m1.ridx (i)) = m1.data (i) OP m2.elem (0,0); \
592
} \
593
} \
594
r.maybe_compress (); \
595
} \
596
} \
597
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
598
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
599
else \
600
{ \
601
\
602
/* FIXME: Kludge... Always double/Complex, so Complex () */
\
603
r = R (m1_nr, m1_nc, (Complex () OP Complex ())); \
604
\
605
for (octave_idx_type i = 0 ; i < m1_nc ; i++) \
606
{ \
607
octave_idx_type ja = m1.cidx (i); \
608
octave_idx_type ja_max = m1.cidx (i+1); \
609
bool ja_lt_max = ja < ja_max; \
610
\
611
octave_idx_type jb = m2.cidx (i); \
612
octave_idx_type jb_max = m2.cidx (i+1); \
613
bool jb_lt_max = jb < jb_max; \
614
\
615
while (ja_lt_max || jb_lt_max) \
616
{ \
617
octave_quit (); \
618
if ((! jb_lt_max) || \
619
(ja_lt_max && (m1.ridx (ja) < m2.ridx (jb)))) \
620
{ \
621
/* keep those kludges coming */
\
622
r.elem (m1.ridx (ja),i) = m1.data (ja) OP Complex (); \
623
ja++; \
624
ja_lt_max= ja < ja_max; \
625
} \
626
else if ((! ja_lt_max) || \
627
(jb_lt_max && (m2.ridx (jb) < m1.ridx (ja)))) \
628
{ \
629
/* keep those kludges coming */
\
630
r.elem (m2.ridx (jb),i) = Complex () OP m2.data (jb); \
631
jb++; \
632
jb_lt_max= jb < jb_max; \
633
} \
634
else \
635
{ \
636
r.elem (m1.ridx (ja),i) = m1.data (ja) OP m2.data (jb); \
637
ja++; \
638
ja_lt_max= ja < ja_max; \
639
jb++; \
640
jb_lt_max= jb < jb_max; \
641
} \
642
} \
643
} \
644
r.maybe_compress (true); \
645
} \
646
\
647
return r; \
648
}
649
650
// Note that SM ./ SM needs to take into account the NaN and Inf values
651
// implied by the division by zero.
652
// FIXME: Are the NaNs double(NaN) or Complex(NaN,Nan) in the complex case?
653
#define SPARSE_SMSM_BIN_OPS(R1, R2, M1, M2) \
654
SPARSE_SMSM_BIN_OP_1 (R1, operator +, +, M1, M2) \
655
SPARSE_SMSM_BIN_OP_1 (R1, operator -, -, M1, M2) \
656
SPARSE_SMSM_BIN_OP_2 (R2, product, *, M1, M2) \
657
SPARSE_SMSM_BIN_OP_3 (R2, quotient, /, M1, M2)
658
659
// FIXME: this macro duplicates the bodies of the template functions
660
// defined in the SPARSE_SSM_CMP_OP and SPARSE_SMS_CMP_OP macros.
661
662
#define SPARSE_SMSM_CMP_OP(F, OP, M1, Z1, C1, M2, Z2, C2) \
663
SparseBoolMatrix \
664
F (const M1& m1, const M2& m2) \
665
{ \
666
SparseBoolMatrix r; \
667
\
668
octave_idx_type m1_nr = m1.rows (); \
669
octave_idx_type m1_nc = m1.cols (); \
670
\
671
octave_idx_type m2_nr = m2.rows (); \
672
octave_idx_type m2_nc = m2.cols (); \
673
\
674
if (m1_nr == 1 && m1_nc == 1) \
675
{ \
676
if (C1 (m1.elem (0,0)) OP C2 (Z2)) \
677
{ \
678
r = SparseBoolMatrix (m2_nr, m2_nc, true); \
679
for (octave_idx_type j = 0; j < m2_nc; j++) \
680
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
681
if (! (C1 (m1.elem (0,0)) OP C2 (m2.data (i)))) \
682
r.data (m2.ridx (i) + j * m2_nr) = false; \
683
r.maybe_compress (true); \
684
} \
685
else \
686
{ \
687
r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \
688
r.cidx (0) = static_cast<octave_idx_type> (0); \
689
octave_idx_type nel = 0; \
690
for (octave_idx_type j = 0; j < m2_nc; j++) \
691
{ \
692
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
693
if (C1 (m1.elem (0,0)) OP C2 (m2.data (i))) \
694
{ \
695
r.ridx (nel) = m2.ridx (i); \
696
r.data (nel++) = true; \
697
} \
698
r.cidx (j + 1) = nel; \
699
} \
700
r.maybe_compress (false); \
701
} \
702
} \
703
else if (m2_nr == 1 && m2_nc == 1) \
704
{ \
705
if (C1 (Z1) OP C2 (m2.elem (0,0))) \
706
{ \
707
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
708
for (octave_idx_type j = 0; j < m1_nc; j++) \
709
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
710
if (! (C1 (m1.data (i)) OP C2 (m2.elem (0,0)))) \
711
r.data (m1.ridx (i) + j * m1_nr) = false; \
712
r.maybe_compress (true); \
713
} \
714
else \
715
{ \
716
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \
717
r.cidx (0) = static_cast<octave_idx_type> (0); \
718
octave_idx_type nel = 0; \
719
for (octave_idx_type j = 0; j < m1_nc; j++) \
720
{ \
721
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
722
if (C1 (m1.data (i)) OP C2 (m2.elem (0,0))) \
723
{ \
724
r.ridx (nel) = m1.ridx (i); \
725
r.data (nel++) = true; \
726
} \
727
r.cidx (j + 1) = nel; \
728
} \
729
r.maybe_compress (false); \
730
} \
731
} \
732
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
733
{ \
734
if (m1_nr != 0 || m1_nc != 0) \
735
{ \
736
if (C1 (Z1) OP C2 (Z2)) \
737
{ \
738
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
739
for (octave_idx_type j = 0; j < m1_nc; j++) \
740
{ \
741
octave_idx_type i1 = m1.cidx (j); \
742
octave_idx_type e1 = m1.cidx (j+1); \
743
octave_idx_type i2 = m2.cidx (j); \
744
octave_idx_type e2 = m2.cidx (j+1); \
745
while (i1 < e1 || i2 < e2) \
746
{ \
747
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
748
{ \
749
if (! (C1 (Z1) OP C2 (m2.data (i2)))) \
750
r.data (m2.ridx (i2) + j * m1_nr) = false; \
751
i2++; \
752
} \
753
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
754
{ \
755
if (! (C1 (m1.data (i1)) OP C2 (Z2))) \
756
r.data (m1.ridx (i1) + j * m1_nr) = false; \
757
i1++; \
758
} \
759
else \
760
{ \
761
if (! (C1 (m1.data (i1)) OP C2 (m2.data (i2)))) \
762
r.data (m1.ridx (i1) + j * m1_nr) = false; \
763
i1++; \
764
i2++; \
765
} \
766
} \
767
} \
768
r.maybe_compress (true); \
769
} \
770
else \
771
{ \
772
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \
773
r.cidx (0) = static_cast<octave_idx_type> (0); \
774
octave_idx_type nel = 0; \
775
for (octave_idx_type j = 0; j < m1_nc; j++) \
776
{ \
777
octave_idx_type i1 = m1.cidx (j); \
778
octave_idx_type e1 = m1.cidx (j+1); \
779
octave_idx_type i2 = m2.cidx (j); \
780
octave_idx_type e2 = m2.cidx (j+1); \
781
while (i1 < e1 || i2 < e2) \
782
{ \
783
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
784
{ \
785
if (C1 (Z1) OP C2 (m2.data (i2))) \
786
{ \
787
r.ridx (nel) = m2.ridx (i2); \
788
r.data (nel++) = true; \
789
} \
790
i2++; \
791
} \
792
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
793
{ \
794
if (C1 (m1.data (i1)) OP C2 (Z2)) \
795
{ \
796
r.ridx (nel) = m1.ridx (i1); \
797
r.data (nel++) = true; \
798
} \
799
i1++; \
800
} \
801
else \
802
{ \
803
if (C1 (m1.data (i1)) OP C2 (m2.data (i2))) \
804
{ \
805
r.ridx (nel) = m1.ridx (i1); \
806
r.data (nel++) = true; \
807
} \
808
i1++; \
809
i2++; \
810
} \
811
} \
812
r.cidx (j + 1) = nel; \
813
} \
814
r.maybe_compress (false); \
815
} \
816
} \
817
} \
818
else \
819
{ \
820
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
821
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
822
} \
823
return r; \
824
}
825
826
#define SPARSE_SMSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
827
SPARSE_SMSM_CMP_OP (mx_el_lt, <, M1, Z1, , M2, Z2, ) \
828
SPARSE_SMSM_CMP_OP (mx_el_le, <=, M1, Z1, , M2, Z2, ) \
829
SPARSE_SMSM_CMP_OP (mx_el_ge, >=, M1, Z1, , M2, Z2, ) \
830
SPARSE_SMSM_CMP_OP (mx_el_gt, >, M1, Z1, , M2, Z2, ) \
831
SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \
832
SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, Z1, , M2, Z2, )
833
834
#define SPARSE_SMSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
835
SPARSE_SMSM_CMP_OP (mx_el_eq, ==, M1, Z1, , M2, Z2, ) \
836
SPARSE_SMSM_CMP_OP (mx_el_ne, !=, M1, Z1, , M2, Z2, )
837
838
// FIXME: this macro duplicates the bodies of the template functions
839
// defined in the SPARSE_SSM_BOOL_OP and SPARSE_SMS_BOOL_OP macros.
840
841
#define SPARSE_SMSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
842
SparseBoolMatrix \
843
F (const M1& m1, const M2& m2) \
844
{ \
845
SparseBoolMatrix r; \
846
\
847
octave_idx_type m1_nr = m1.rows (); \
848
octave_idx_type m1_nc = m1.cols (); \
849
\
850
octave_idx_type m2_nr = m2.rows (); \
851
octave_idx_type m2_nc = m2.cols (); \
852
\
853
if (m1_nr == 1 && m1_nc == 1) \
854
{ \
855
if (m2_nr > 0 && m2_nc > 0) \
856
{ \
857
if ((m1.elem (0,0) != LHS_ZERO) OP RHS_ZERO) \
858
{ \
859
r = SparseBoolMatrix (m2_nr, m2_nc, true); \
860
for (octave_idx_type j = 0; j < m2_nc; j++) \
861
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
862
if (! ((m1.elem (0,0) != LHS_ZERO) OP (m2.data (i) != RHS_ZERO))) \
863
r.data (m2.ridx (i) + j * m2_nr) = false; \
864
r.maybe_compress (true); \
865
} \
866
else \
867
{ \
868
r = SparseBoolMatrix (m2_nr, m2_nc, m2.nnz ()); \
869
r.cidx (0) = static_cast<octave_idx_type> (0); \
870
octave_idx_type nel = 0; \
871
for (octave_idx_type j = 0; j < m2_nc; j++) \
872
{ \
873
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
874
if ((m1.elem (0,0) != LHS_ZERO) OP (m2.data (i) != RHS_ZERO)) \
875
{ \
876
r.ridx (nel) = m2.ridx (i); \
877
r.data (nel++) = true; \
878
} \
879
r.cidx (j + 1) = nel; \
880
} \
881
r.maybe_compress (false); \
882
} \
883
} \
884
} \
885
else if (m2_nr == 1 && m2_nc == 1) \
886
{ \
887
if (m1_nr > 0 && m1_nc > 0) \
888
{ \
889
if (LHS_ZERO OP (m2.elem (0,0) != RHS_ZERO)) \
890
{ \
891
r = SparseBoolMatrix (m1_nr, m1_nc, true); \
892
for (octave_idx_type j = 0; j < m1_nc; j++) \
893
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
894
if (! ((m1.data (i) != LHS_ZERO) OP (m2.elem (0,0) != RHS_ZERO))) \
895
r.data (m1.ridx (i) + j * m1_nr) = false; \
896
r.maybe_compress (true); \
897
} \
898
else \
899
{ \
900
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz ()); \
901
r.cidx (0) = static_cast<octave_idx_type> (0); \
902
octave_idx_type nel = 0; \
903
for (octave_idx_type j = 0; j < m1_nc; j++) \
904
{ \
905
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
906
if ((m1.data (i) != LHS_ZERO) OP (m2.elem (0,0) != RHS_ZERO)) \
907
{ \
908
r.ridx (nel) = m1.ridx (i); \
909
r.data (nel++) = true; \
910
} \
911
r.cidx (j + 1) = nel; \
912
} \
913
r.maybe_compress (false); \
914
} \
915
} \
916
} \
917
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
918
{ \
919
if (m1_nr != 0 || m1_nc != 0) \
920
{ \
921
r = SparseBoolMatrix (m1_nr, m1_nc, m1.nnz () + m2.nnz ()); \
922
r.cidx (0) = static_cast<octave_idx_type> (0); \
923
octave_idx_type nel = 0; \
924
for (octave_idx_type j = 0; j < m1_nc; j++) \
925
{ \
926
octave_idx_type i1 = m1.cidx (j); \
927
octave_idx_type e1 = m1.cidx (j+1); \
928
octave_idx_type i2 = m2.cidx (j); \
929
octave_idx_type e2 = m2.cidx (j+1); \
930
while (i1 < e1 || i2 < e2) \
931
{ \
932
if (i1 == e1 || (i2 < e2 && m1.ridx (i1) > m2.ridx (i2))) \
933
{ \
934
if (LHS_ZERO OP m2.data (i2) != RHS_ZERO) \
935
{ \
936
r.ridx (nel) = m2.ridx (i2); \
937
r.data (nel++) = true; \
938
} \
939
i2++; \
940
} \
941
else if (i2 == e2 || m1.ridx (i1) < m2.ridx (i2)) \
942
{ \
943
if (m1.data (i1) != LHS_ZERO OP RHS_ZERO) \
944
{ \
945
r.ridx (nel) = m1.ridx (i1); \
946
r.data (nel++) = true; \
947
} \
948
i1++; \
949
} \
950
else \
951
{ \
952
if (m1.data (i1) != LHS_ZERO OP m2.data (i2) != RHS_ZERO) \
953
{ \
954
r.ridx (nel) = m1.ridx (i1); \
955
r.data (nel++) = true; \
956
} \
957
i1++; \
958
i2++; \
959
} \
960
} \
961
r.cidx (j + 1) = nel; \
962
} \
963
r.maybe_compress (false); \
964
} \
965
} \
966
else \
967
{ \
968
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
969
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
970
} \
971
return r; \
972
}
973
974
#define SPARSE_SMSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
975
SPARSE_SMSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
976
SPARSE_SMSM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO)
977
978
#define SPARSE_SMSM_BOOL_OPS(M1, M2, ZERO) \
979
SPARSE_SMSM_BOOL_OPS2(M1, M2, ZERO, ZERO)
980
981
// matrix by sparse matrix operations.
982
983
#define SPARSE_MSM_BIN_OP_1(R, F, OP, M1, M2) \
984
R \
985
F (const M1& m1, const M2& m2) \
986
{ \
987
R r; \
988
\
989
octave_idx_type m1_nr = m1.rows (); \
990
octave_idx_type m1_nc = m1.cols (); \
991
\
992
octave_idx_type m2_nr = m2.rows (); \
993
octave_idx_type m2_nc = m2.cols (); \
994
\
995
if (m2_nr == 1 && m2_nc == 1) \
996
r = R (m1 OP m2.elem (0,0)); \
997
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
998
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
999
else \
1000
{ \
1001
r = R (F (m1, m2.matrix_value ())); \
1002
} \
1003
return r; \
1004
}
1005
1006
#define SPARSE_MSM_BIN_OP_2(R, F, OP, M1, M2) \
1007
R \
1008
F (const M1& m1, const M2& m2) \
1009
{ \
1010
R r; \
1011
\
1012
octave_idx_type m1_nr = m1.rows (); \
1013
octave_idx_type m1_nc = m1.cols (); \
1014
\
1015
octave_idx_type m2_nr = m2.rows (); \
1016
octave_idx_type m2_nc = m2.cols (); \
1017
\
1018
if (m2_nr == 1 && m2_nc == 1) \
1019
r = R (m1 OP m2.elem (0,0)); \
1020
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1021
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1022
else \
1023
{ \
1024
if (do_mx_check (m1, mx_inline_all_finite<M1::element_type>)) \
1025
{ \
1026
/* Sparsity pattern is preserved. */
\
1027
octave_idx_type m2_nz = m2.nnz (); \
1028
r = R (m2_nr, m2_nc, m2_nz); \
1029
for (octave_idx_type j = 0, k = 0; j < m2_nc; j++) \
1030
{ \
1031
octave_quit (); \
1032
for (octave_idx_type i = m2.cidx (j); i < m2.cidx (j+1); i++) \
1033
{ \
1034
octave_idx_type mri = m2.ridx (i); \
1035
R::element_type x = m1(mri, j) OP m2.data (i); \
1036
if (x != 0.0) \
1037
{ \
1038
r.xdata (k) = x; \
1039
r.xridx (k) = m2.ridx (i); \
1040
k++; \
1041
} \
1042
} \
1043
r.xcidx (j+1) = k; \
1044
} \
1045
r.maybe_compress (false); \
1046
return r; \
1047
} \
1048
else \
1049
r = R (F (m1, m2.matrix_value ())); \
1050
} \
1051
\
1052
return r; \
1053
}
1054
1055
// FIXME: Pass a specific ZERO value
1056
#define SPARSE_MSM_BIN_OPS(R1, R2, M1, M2) \
1057
SPARSE_MSM_BIN_OP_1 (R1, operator +, +, M1, M2) \
1058
SPARSE_MSM_BIN_OP_1 (R1, operator -, -, M1, M2) \
1059
SPARSE_MSM_BIN_OP_2 (R2, product, *, M1, M2) \
1060
SPARSE_MSM_BIN_OP_1 (R2, quotient, /, M1, M2)
1061
1062
#define SPARSE_MSM_CMP_OP(F, OP, M1, C1, M2, C2) \
1063
SparseBoolMatrix \
1064
F (const M1& m1, const M2& m2) \
1065
{ \
1066
SparseBoolMatrix r; \
1067
\
1068
octave_idx_type m1_nr = m1.rows (); \
1069
octave_idx_type m1_nc = m1.cols (); \
1070
\
1071
octave_idx_type m2_nr = m2.rows (); \
1072
octave_idx_type m2_nc = m2.cols (); \
1073
\
1074
if (m2_nr == 1 && m2_nc == 1) \
1075
r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \
1076
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1077
{ \
1078
if (m1_nr != 0 || m1_nc != 0) \
1079
{ \
1080
/* Count num of nonzero elements */
\
1081
octave_idx_type nel = 0; \
1082
for (octave_idx_type j = 0; j < m1_nc; j++) \
1083
for (octave_idx_type i = 0; i < m1_nr; i++) \
1084
if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \
1085
nel++; \
1086
\
1087
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1088
\
1089
octave_idx_type ii = 0; \
1090
r.cidx (0) = 0; \
1091
for (octave_idx_type j = 0; j < m1_nc; j++) \
1092
{ \
1093
for (octave_idx_type i = 0; i < m1_nr; i++) \
1094
{ \
1095
bool el = C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j)); \
1096
if (el) \
1097
{ \
1098
r.data (ii) = el; \
1099
r.ridx (ii++) = i; \
1100
} \
1101
} \
1102
r.cidx (j+1) = ii; \
1103
} \
1104
} \
1105
} \
1106
else \
1107
{ \
1108
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1109
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1110
} \
1111
return r; \
1112
}
1113
1114
#define SPARSE_MSM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
1115
SPARSE_MSM_CMP_OP (mx_el_lt, <, M1, , M2, ) \
1116
SPARSE_MSM_CMP_OP (mx_el_le, <=, M1, , M2, ) \
1117
SPARSE_MSM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \
1118
SPARSE_MSM_CMP_OP (mx_el_gt, >, M1, , M2, ) \
1119
SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1120
SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1121
1122
#define SPARSE_MSM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
1123
SPARSE_MSM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1124
SPARSE_MSM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1125
1126
#define SPARSE_MSM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
1127
SparseBoolMatrix \
1128
F (const M1& m1, const M2& m2) \
1129
{ \
1130
SparseBoolMatrix r; \
1131
\
1132
octave_idx_type m1_nr = m1.rows (); \
1133
octave_idx_type m1_nc = m1.cols (); \
1134
\
1135
octave_idx_type m2_nr = m2.rows (); \
1136
octave_idx_type m2_nc = m2.cols (); \
1137
\
1138
if (m2_nr == 1 && m2_nc == 1) \
1139
r = SparseBoolMatrix (F (m1, m2.elem (0,0))); \
1140
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1141
{ \
1142
if (m1_nr != 0 || m1_nc != 0) \
1143
{ \
1144
/* Count num of nonzero elements */
\
1145
octave_idx_type nel = 0; \
1146
for (octave_idx_type j = 0; j < m1_nc; j++) \
1147
for (octave_idx_type i = 0; i < m1_nr; i++) \
1148
if ((m1.elem (i, j) != LHS_ZERO) \
1149
OP (m2.elem (i, j) != RHS_ZERO)) \
1150
nel++; \
1151
\
1152
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1153
\
1154
octave_idx_type ii = 0; \
1155
r.cidx (0) = 0; \
1156
for (octave_idx_type j = 0; j < m1_nc; j++) \
1157
{ \
1158
for (octave_idx_type i = 0; i < m1_nr; i++) \
1159
{ \
1160
bool el = (m1.elem (i, j) != LHS_ZERO) \
1161
OP (m2.elem (i, j) != RHS_ZERO); \
1162
if (el) \
1163
{ \
1164
r.data (ii) = el; \
1165
r.ridx (ii++) = i; \
1166
} \
1167
} \
1168
r.cidx (j+1) = ii; \
1169
} \
1170
} \
1171
} \
1172
else \
1173
{ \
1174
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1175
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1176
} \
1177
return r; \
1178
}
1179
1180
#define SPARSE_MSM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
1181
SPARSE_MSM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
1182
SPARSE_MSM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO)
1183
1184
#define SPARSE_MSM_BOOL_OPS(M1, M2, ZERO) \
1185
SPARSE_MSM_BOOL_OPS2(M1, M2, ZERO, ZERO)
1186
1187
// sparse matrix by matrix operations.
1188
1189
#define SPARSE_SMM_BIN_OP_1(R, F, OP, M1, M2) \
1190
R \
1191
F (const M1& m1, const M2& m2) \
1192
{ \
1193
R r; \
1194
\
1195
octave_idx_type m1_nr = m1.rows (); \
1196
octave_idx_type m1_nc = m1.cols (); \
1197
\
1198
octave_idx_type m2_nr = m2.rows (); \
1199
octave_idx_type m2_nc = m2.cols (); \
1200
\
1201
if (m1_nr == 1 && m1_nc == 1) \
1202
r = R (m1.elem (0,0) OP m2); \
1203
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1204
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1205
else \
1206
{ \
1207
r = R (m1.matrix_value () OP m2); \
1208
} \
1209
return r; \
1210
}
1211
1212
// sm .* m preserves sparsity if m contains no Infs nor Nans.
1213
#define SPARSE_SMM_BIN_OP_2_CHECK_product(ET) \
1214
do_mx_check (m2, mx_inline_all_finite<ET>)
1215
1216
// sm ./ m preserves sparsity if m contains no NaNs or zeros.
1217
#define SPARSE_SMM_BIN_OP_2_CHECK_quotient(ET) \
1218
! do_mx_check (m2, mx_inline_any_nan<ET>) && m2.nnz () == m2.numel ()
1219
1220
#define SPARSE_SMM_BIN_OP_2(R, F, OP, M1, M2) \
1221
R \
1222
F (const M1& m1, const M2& m2) \
1223
{ \
1224
R r; \
1225
\
1226
octave_idx_type m1_nr = m1.rows (); \
1227
octave_idx_type m1_nc = m1.cols (); \
1228
\
1229
octave_idx_type m2_nr = m2.rows (); \
1230
octave_idx_type m2_nc = m2.cols (); \
1231
\
1232
if (m1_nr == 1 && m1_nc == 1) \
1233
r = R (m1.elem (0,0) OP m2); \
1234
else if (m1_nr != m2_nr || m1_nc != m2_nc) \
1235
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1236
else \
1237
{ \
1238
if (SPARSE_SMM_BIN_OP_2_CHECK_ ## F(M2::element_type)) \
1239
{ \
1240
/* Sparsity pattern is preserved. */
\
1241
octave_idx_type m1_nz = m1.nnz (); \
1242
r = R (m1_nr, m1_nc, m1_nz); \
1243
for (octave_idx_type j = 0, k = 0; j < m1_nc; j++) \
1244
{ \
1245
octave_quit (); \
1246
for (octave_idx_type i = m1.cidx (j); i < m1.cidx (j+1); i++) \
1247
{ \
1248
octave_idx_type mri = m1.ridx (i); \
1249
R::element_type x = m1.data (i) OP m2 (mri, j); \
1250
if (x != 0.0) \
1251
{ \
1252
r.xdata (k) = x; \
1253
r.xridx (k) = m1.ridx (i); \
1254
k++; \
1255
} \
1256
} \
1257
r.xcidx (j+1) = k; \
1258
} \
1259
r.maybe_compress (false); \
1260
return r; \
1261
} \
1262
else \
1263
r = R (F (m1.matrix_value (), m2)); \
1264
} \
1265
\
1266
return r; \
1267
}
1268
1269
#define SPARSE_SMM_BIN_OPS(R1, R2, M1, M2) \
1270
SPARSE_SMM_BIN_OP_1 (R1, operator +, +, M1, M2) \
1271
SPARSE_SMM_BIN_OP_1 (R1, operator -, -, M1, M2) \
1272
SPARSE_SMM_BIN_OP_2 (R2, product, *, M1, M2) \
1273
SPARSE_SMM_BIN_OP_2 (R2, quotient, /, M1, M2)
1274
1275
#define SPARSE_SMM_CMP_OP(F, OP, M1, C1, M2, C2) \
1276
SparseBoolMatrix \
1277
F (const M1& m1, const M2& m2) \
1278
{ \
1279
SparseBoolMatrix r; \
1280
\
1281
octave_idx_type m1_nr = m1.rows (); \
1282
octave_idx_type m1_nc = m1.cols (); \
1283
\
1284
octave_idx_type m2_nr = m2.rows (); \
1285
octave_idx_type m2_nc = m2.cols (); \
1286
\
1287
if (m1_nr == 1 && m1_nc == 1) \
1288
r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \
1289
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1290
{ \
1291
if (m1_nr != 0 || m1_nc != 0) \
1292
{ \
1293
/* Count num of nonzero elements */
\
1294
octave_idx_type nel = 0; \
1295
for (octave_idx_type j = 0; j < m1_nc; j++) \
1296
for (octave_idx_type i = 0; i < m1_nr; i++) \
1297
if (C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j))) \
1298
nel++; \
1299
\
1300
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1301
\
1302
octave_idx_type ii = 0; \
1303
r.cidx (0) = 0; \
1304
for (octave_idx_type j = 0; j < m1_nc; j++) \
1305
{ \
1306
for (octave_idx_type i = 0; i < m1_nr; i++) \
1307
{ \
1308
bool el = C1 (m1.elem (i, j)) OP C2 (m2.elem (i, j)); \
1309
if (el) \
1310
{ \
1311
r.data (ii) = el; \
1312
r.ridx (ii++) = i; \
1313
} \
1314
} \
1315
r.cidx (j+1) = ii; \
1316
} \
1317
} \
1318
} \
1319
else \
1320
{ \
1321
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1322
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1323
} \
1324
return r; \
1325
}
1326
1327
#define SPARSE_SMM_CMP_OPS(M1, Z1, C1, M2, Z2, C2) \
1328
SPARSE_SMM_CMP_OP (mx_el_lt, <, M1, , M2, ) \
1329
SPARSE_SMM_CMP_OP (mx_el_le, <=, M1, , M2, ) \
1330
SPARSE_SMM_CMP_OP (mx_el_ge, >=, M1, , M2, ) \
1331
SPARSE_SMM_CMP_OP (mx_el_gt, >, M1, , M2, ) \
1332
SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1333
SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1334
1335
#define SPARSE_SMM_EQNE_OPS(M1, Z1, C1, M2, Z2, C2) \
1336
SPARSE_SMM_CMP_OP (mx_el_eq, ==, M1, , M2, ) \
1337
SPARSE_SMM_CMP_OP (mx_el_ne, !=, M1, , M2, )
1338
1339
#define SPARSE_SMM_BOOL_OP(F, OP, M1, M2, LHS_ZERO, RHS_ZERO) \
1340
SparseBoolMatrix \
1341
F (const M1& m1, const M2& m2) \
1342
{ \
1343
SparseBoolMatrix r; \
1344
\
1345
octave_idx_type m1_nr = m1.rows (); \
1346
octave_idx_type m1_nc = m1.cols (); \
1347
\
1348
octave_idx_type m2_nr = m2.rows (); \
1349
octave_idx_type m2_nc = m2.cols (); \
1350
\
1351
if (m1_nr == 1 && m1_nc == 1) \
1352
r = SparseBoolMatrix (F (m1.elem (0,0), m2)); \
1353
else if (m1_nr == m2_nr && m1_nc == m2_nc) \
1354
{ \
1355
if (m1_nr != 0 || m1_nc != 0) \
1356
{ \
1357
/* Count num of nonzero elements */
\
1358
octave_idx_type nel = 0; \
1359
for (octave_idx_type j = 0; j < m1_nc; j++) \
1360
for (octave_idx_type i = 0; i < m1_nr; i++) \
1361
if ((m1.elem (i, j) != LHS_ZERO) \
1362
OP (m2.elem (i, j) != RHS_ZERO)) \
1363
nel++; \
1364
\
1365
r = SparseBoolMatrix (m1_nr, m1_nc, nel); \
1366
\
1367
octave_idx_type ii = 0; \
1368
r.cidx (0) = 0; \
1369
for (octave_idx_type j = 0; j < m1_nc; j++) \
1370
{ \
1371
for (octave_idx_type i = 0; i < m1_nr; i++) \
1372
{ \
1373
bool el = (m1.elem (i, j) != LHS_ZERO) \
1374
OP (m2.elem (i, j) != RHS_ZERO); \
1375
if (el) \
1376
{ \
1377
r.data (ii) = el; \
1378
r.ridx (ii++) = i; \
1379
} \
1380
} \
1381
r.cidx (j+1) = ii; \
1382
} \
1383
} \
1384
} \
1385
else \
1386
{ \
1387
if ((m1_nr != 0 || m1_nc != 0) && (m2_nr != 0 || m2_nc != 0)) \
1388
octave::err_nonconformant (#F, m1_nr, m1_nc, m2_nr, m2_nc); \
1389
} \
1390
return r; \
1391
}
1392
1393
#define SPARSE_SMM_BOOL_OPS2(M1, M2, LHS_ZERO, RHS_ZERO) \
1394
SPARSE_SMM_BOOL_OP (mx_el_and, &&, M1, M2, LHS_ZERO, RHS_ZERO) \
1395
SPARSE_SMM_BOOL_OP (mx_el_or, ||, M1, M2, LHS_ZERO, RHS_ZERO)
1396
1397
#define SPARSE_SMM_BOOL_OPS(M1, M2, ZERO) \
1398
SPARSE_SMM_BOOL_OPS2(M1, M2, ZERO, ZERO)
1399
1400
// Avoid some code duplication. Maybe we should use templates.
1401
1402
#define SPARSE_CUMSUM(RET_TYPE, ELT_TYPE, FCN) \
1403
\
1404
octave_idx_type nr = rows (); \
1405
octave_idx_type nc = cols (); \
1406
\
1407
RET_TYPE retval; \
1408
\
1409
if (nr > 0 && nc > 0) \
1410
{ \
1411
if ((nr == 1 && dim == -1) || dim == 1) \
1412
/* Ugly!! Is there a better way? */
\
1413
retval = transpose (). FCN (0) .transpose (); \
1414
else \
1415
{ \
1416
octave_idx_type nel = 0; \
1417
for (octave_idx_type i = 0; i < nc; i++) \
1418
{ \
1419
ELT_TYPE t = ELT_TYPE (); \
1420
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1421
{ \
1422
t += data (j); \
1423
if (t != ELT_TYPE ()) \
1424
{ \
1425
if (j == cidx (i+1) - 1) \
1426
nel += nr - ridx (j); \
1427
else \
1428
nel += ridx (j+1) - ridx (j); \
1429
} \
1430
} \
1431
} \
1432
retval = RET_TYPE (nr, nc, nel); \
1433
retval.cidx (0) = 0; \
1434
octave_idx_type ii = 0; \
1435
for (octave_idx_type i = 0; i < nc; i++) \
1436
{ \
1437
ELT_TYPE t = ELT_TYPE (); \
1438
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1439
{ \
1440
t += data (j); \
1441
if (t != ELT_TYPE ()) \
1442
{ \
1443
if (j == cidx (i+1) - 1) \
1444
{ \
1445
for (octave_idx_type k = ridx (j); k < nr; k++) \
1446
{ \
1447
retval.data (ii) = t; \
1448
retval.ridx (ii++) = k; \
1449
} \
1450
} \
1451
else \
1452
{ \
1453
for (octave_idx_type k = ridx (j); k < ridx (j+1); k++) \
1454
{ \
1455
retval.data (ii) = t; \
1456
retval.ridx (ii++) = k; \
1457
} \
1458
} \
1459
} \
1460
} \
1461
retval.cidx (i+1) = ii; \
1462
} \
1463
} \
1464
} \
1465
else \
1466
retval = RET_TYPE (nr,nc); \
1467
\
1468
return retval
1469
1470
#define SPARSE_CUMPROD(RET_TYPE, ELT_TYPE, FCN) \
1471
\
1472
octave_idx_type nr = rows (); \
1473
octave_idx_type nc = cols (); \
1474
\
1475
RET_TYPE retval; \
1476
\
1477
if (nr > 0 && nc > 0) \
1478
{ \
1479
if ((nr == 1 && dim == -1) || dim == 1) \
1480
/* Ugly!! Is there a better way? */
\
1481
retval = transpose (). FCN (0) .transpose (); \
1482
else \
1483
{ \
1484
octave_idx_type nel = 0; \
1485
for (octave_idx_type i = 0; i < nc; i++) \
1486
{ \
1487
octave_idx_type jj = 0; \
1488
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1489
{ \
1490
if (jj == ridx (j)) \
1491
{ \
1492
nel++; \
1493
jj++; \
1494
} \
1495
else \
1496
break; \
1497
} \
1498
} \
1499
retval = RET_TYPE (nr, nc, nel); \
1500
retval.cidx (0) = 0; \
1501
octave_idx_type ii = 0; \
1502
for (octave_idx_type i = 0; i < nc; i++) \
1503
{ \
1504
ELT_TYPE t = ELT_TYPE (1.); \
1505
octave_idx_type jj = 0; \
1506
for (octave_idx_type j = cidx (i); j < cidx (i+1); j++) \
1507
{ \
1508
if (jj == ridx (j)) \
1509
{ \
1510
t *= data (j); \
1511
retval.data (ii) = t; \
1512
retval.ridx (ii++) = jj++; \
1513
} \
1514
else \
1515
break; \
1516
} \
1517
retval.cidx (i+1) = ii; \
1518
} \
1519
} \
1520
} \
1521
else \
1522
retval = RET_TYPE (nr,nc); \
1523
\
1524
return retval
1525
1526
#define SPARSE_BASE_REDUCTION_OP(RET_TYPE, EL_TYPE, ROW_EXPR, COL_EXPR, \
1527
INIT_VAL, MT_RESULT) \
1528
\
1529
octave_idx_type nr = rows (); \
1530
octave_idx_type nc = cols (); \
1531
\
1532
RET_TYPE retval; \
1533
\
1534
if (nr > 0 && nc > 0) \
1535
{ \
1536
if ((nr == 1 && dim == -1) || dim == 1) \
1537
{ \
1538
/* Define j here to allow fancy definition for prod method */
\
1539
octave_idx_type j = 0; \
1540
OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nr); \
1541
\
1542
for (octave_idx_type i = 0; i < nr; i++) \
1543
tmp[i] = INIT_VAL; \
1544
for (j = 0; j < nc; j++) \
1545
{ \
1546
for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \
1547
{ \
1548
ROW_EXPR; \
1549
} \
1550
} \
1551
octave_idx_type nel = 0; \
1552
for (octave_idx_type i = 0; i < nr; i++) \
1553
if (tmp[i] != EL_TYPE ()) \
1554
nel++; \
1555
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), nel); \
1556
retval.cidx (0) = 0; \
1557
retval.cidx (1) = nel; \
1558
nel = 0; \
1559
for (octave_idx_type i = 0; i < nr; i++) \
1560
if (tmp[i] != EL_TYPE ()) \
1561
{ \
1562
retval.data (nel) = tmp[i]; \
1563
retval.ridx (nel++) = i; \
1564
} \
1565
} \
1566
else \
1567
{ \
1568
OCTAVE_LOCAL_BUFFER (EL_TYPE, tmp, nc); \
1569
\
1570
for (octave_idx_type j = 0; j < nc; j++) \
1571
{ \
1572
tmp[j] = INIT_VAL; \
1573
for (octave_idx_type i = cidx (j); i < cidx (j + 1); i++) \
1574
{ \
1575
COL_EXPR; \
1576
} \
1577
} \
1578
octave_idx_type nel = 0; \
1579
for (octave_idx_type i = 0; i < nc; i++) \
1580
if (tmp[i] != EL_TYPE ()) \
1581
nel++; \
1582
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, nel); \
1583
retval.cidx (0) = 0; \
1584
nel = 0; \
1585
for (octave_idx_type i = 0; i < nc; i++) \
1586
if (tmp[i] != EL_TYPE ()) \
1587
{ \
1588
retval.data (nel) = tmp[i]; \
1589
retval.ridx (nel++) = 0; \
1590
retval.cidx (i+1) = retval.cidx (i) + 1; \
1591
} \
1592
else \
1593
retval.cidx (i+1) = retval.cidx (i); \
1594
} \
1595
} \
1596
else if (nc == 0 && (nr == 0 || (nr == 1 && dim == -1))) \
1597
{ \
1598
if (MT_RESULT) \
1599
{ \
1600
retval = RET_TYPE (static_cast<octave_idx_type> (1), \
1601
static_cast<octave_idx_type> (1), \
1602
static_cast<octave_idx_type> (1)); \
1603
retval.cidx (0) = 0; \
1604
retval.cidx (1) = 1; \
1605
retval.ridx (0) = 0; \
1606
retval.data (0) = MT_RESULT; \
1607
} \
1608
else \
1609
retval = RET_TYPE (static_cast<octave_idx_type> (1), \
1610
static_cast<octave_idx_type> (1), \
1611
static_cast<octave_idx_type> (0)); \
1612
} \
1613
else if (nr == 0 && (dim == 0 || dim == -1)) \
1614
{ \
1615
if (MT_RESULT) \
1616
{ \
1617
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, nc); \
1618
retval.cidx (0) = 0; \
1619
for (octave_idx_type i = 0; i < nc ; i++) \
1620
{ \
1621
retval.ridx (i) = 0; \
1622
retval.cidx (i+1) = i+1; \
1623
retval.data (i) = MT_RESULT; \
1624
} \
1625
} \
1626
else \
1627
retval = RET_TYPE (static_cast<octave_idx_type> (1), nc, \
1628
static_cast<octave_idx_type> (0)); \
1629
} \
1630
else if (nc == 0 && dim == 1) \
1631
{ \
1632
if (MT_RESULT) \
1633
{ \
1634
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), nr); \
1635
retval.cidx (0) = 0; \
1636
retval.cidx (1) = nr; \
1637
for (octave_idx_type i = 0; i < nr; i++) \
1638
{ \
1639
retval.ridx (i) = i; \
1640
retval.data (i) = MT_RESULT; \
1641
} \
1642
} \
1643
else \
1644
retval = RET_TYPE (nr, static_cast<octave_idx_type> (1), \
1645
static_cast<octave_idx_type> (0)); \
1646
} \
1647
else \
1648
retval.resize (nr > 0, nc > 0); \
1649
\
1650
return retval
1651
1652
#define SPARSE_REDUCTION_OP_ROW_EXPR(OP) \
1653
tmp[ridx (i)] OP data (i)
1654
1655
#define SPARSE_REDUCTION_OP_COL_EXPR(OP) \
1656
tmp[j] OP data (i)
1657
1658
#define SPARSE_REDUCTION_OP(RET_TYPE, EL_TYPE, OP, INIT_VAL, MT_RESULT) \
1659
SPARSE_BASE_REDUCTION_OP (RET_TYPE, EL_TYPE, \
1660
SPARSE_REDUCTION_OP_ROW_EXPR (OP), \
1661
SPARSE_REDUCTION_OP_COL_EXPR (OP), \
1662
INIT_VAL, MT_RESULT)
1663
1664
// Don't break from this loop if the test succeeds because
1665
// we are looping over the rows and not the columns in the inner loop.
1666
#define SPARSE_ANY_ALL_OP_ROW_CODE(TEST_OP, TEST_TRUE_VAL) \
1667
if (data (i) TEST_OP 0.0) \
1668
tmp[ridx (i)] = TEST_TRUE_VAL;
1669
1670
#define SPARSE_ANY_ALL_OP_COL_CODE(TEST_OP, TEST_TRUE_VAL) \
1671
if (data (i) TEST_OP 0.0) \
1672
{ \
1673
tmp[j] = TEST_TRUE_VAL; \
1674
break; \
1675
}
1676
1677
#define SPARSE_ANY_ALL_OP(DIM, INIT_VAL, MT_RESULT, TEST_OP, TEST_TRUE_VAL) \
1678
SPARSE_BASE_REDUCTION_OP (SparseBoolMatrix, char, \
1679
SPARSE_ANY_ALL_OP_ROW_CODE (TEST_OP, TEST_TRUE_VAL), \
1680
SPARSE_ANY_ALL_OP_COL_CODE (TEST_OP, TEST_TRUE_VAL), \
1681
INIT_VAL, MT_RESULT)
1682
1683
#define SPARSE_ALL_OP(DIM) \
1684
if ((rows () == 1 && dim == -1) || dim == 1) \
1685
return transpose (). all (0). transpose (); \
1686
else \
1687
{ \
1688
SPARSE_ANY_ALL_OP (DIM, (cidx (j+1) - cidx (j) < nr ? false : true), \
1689
true, ==, false); \
1690
}
1691
1692
#define SPARSE_ANY_OP(DIM) SPARSE_ANY_ALL_OP (DIM, false, false, !=, true)
1693
1694
#define SPARSE_SPARSE_MUL(RET_TYPE, RET_EL_TYPE, EL_TYPE) \
1695
octave_idx_type nr = m.rows (); \
1696
octave_idx_type nc = m.cols (); \
1697
\
1698
octave_idx_type a_nr = a.rows (); \
1699
octave_idx_type a_nc = a.cols (); \
1700
\
1701
if (nr == 1 && nc == 1) \
1702
{ \
1703
RET_EL_TYPE s = m.elem (0,0); \
1704
octave_idx_type nz = a.nnz (); \
1705
RET_TYPE r (a_nr, a_nc, nz); \
1706
\
1707
for (octave_idx_type i = 0; i < nz; i++) \
1708
{ \
1709
octave_quit (); \
1710
r.data (i) = s * a.data (i); \
1711
r.ridx (i) = a.ridx (i); \
1712
} \
1713
for (octave_idx_type i = 0; i < a_nc + 1; i++) \
1714
{ \
1715
octave_quit (); \
1716
r.cidx (i) = a.cidx (i); \
1717
} \
1718
\
1719
r.maybe_compress (true); \
1720
return r; \
1721
} \
1722
else if (a_nr == 1 && a_nc == 1) \
1723
{ \
1724
RET_EL_TYPE s = a.elem (0,0); \
1725
octave_idx_type nz = m.nnz (); \
1726
RET_TYPE r (nr, nc, nz); \
1727
\
1728
for (octave_idx_type i = 0; i < nz; i++) \
1729
{ \
1730
octave_quit (); \
1731
r.data (i) = m.data (i) * s; \
1732
r.ridx (i) = m.ridx (i); \
1733
} \
1734
for (octave_idx_type i = 0; i < nc + 1; i++) \
1735
{ \
1736
octave_quit (); \
1737
r.cidx (i) = m.cidx (i); \
1738
} \
1739
\
1740
r.maybe_compress (true); \
1741
return r; \
1742
} \
1743
else if (nc != a_nr) \
1744
octave::err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
1745
else \
1746
{ \
1747
OCTAVE_LOCAL_BUFFER (octave_idx_type, w, nr); \
1748
RET_TYPE retval (nr, a_nc, static_cast<octave_idx_type> (0)); \
1749
for (octave_idx_type i = 0; i < nr; i++) \
1750
w[i] = 0; \
1751
retval.xcidx (0) = 0; \
1752
\
1753
octave_idx_type nel = 0; \
1754
\
1755
for (octave_idx_type i = 0; i < a_nc; i++) \
1756
{ \
1757
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1758
{ \
1759
octave_idx_type col = a.ridx (j); \
1760
for (octave_idx_type k = m.cidx (col) ; k < m.cidx (col+1); k++) \
1761
{ \
1762
if (w[m.ridx (k)] < i + 1) \
1763
{ \
1764
w[m.ridx (k)] = i + 1; \
1765
nel++; \
1766
} \
1767
octave_quit (); \
1768
} \
1769
} \
1770
retval.xcidx (i+1) = nel; \
1771
} \
1772
\
1773
if (nel == 0) \
1774
return RET_TYPE (nr, a_nc); \
1775
else \
1776
{ \
1777
for (octave_idx_type i = 0; i < nr; i++) \
1778
w[i] = 0; \
1779
\
1780
OCTAVE_LOCAL_BUFFER (RET_EL_TYPE, Xcol, nr); \
1781
\
1782
retval.change_capacity (nel); \
1783
/* The optimal break-point as estimated from simulations */
\
1784
/* Note that Mergesort is O(nz log(nz)) while searching all */
\
1785
/* values is O(nr), where nz here is nonzero per row of */
\
1786
/* length nr. The test itself was then derived from the */
\
1787
/* simulation with random square matrices and the observation */
\
1788
/* of the number of nonzero elements in the output matrix */
\
1789
/* it was found that the breakpoints were */
\
1790
/* nr: 500 1000 2000 5000 10000 */
\
1791
/* nz: 6 25 97 585 2202 */
\
1792
/* The below is a simplication of the 'polyfit'-ed parameters */
\
1793
/* to these breakpoints */
\
1794
octave_idx_type n_per_col = (a_nc > 43000 ? 43000 : \
1795
(a_nc * a_nc) / 43000); \
1796
octave_idx_type ii = 0; \
1797
octave_idx_type *ri = retval.xridx (); \
1798
octave_sort<octave_idx_type> sort; \
1799
\
1800
for (octave_idx_type i = 0; i < a_nc ; i++) \
1801
{ \
1802
if (retval.xcidx (i+1) - retval.xcidx (i) > n_per_col) \
1803
{ \
1804
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1805
{ \
1806
octave_idx_type col = a.ridx (j); \
1807
EL_TYPE tmpval = a.data (j); \
1808
for (octave_idx_type k = m.cidx (col) ; \
1809
k < m.cidx (col+1); k++) \
1810
{ \
1811
octave_quit (); \
1812
octave_idx_type row = m.ridx (k); \
1813
if (w[row] < i + 1) \
1814
{ \
1815
w[row] = i + 1; \
1816
Xcol[row] = tmpval * m.data (k); \
1817
} \
1818
else \
1819
Xcol[row] += tmpval * m.data (k); \
1820
} \
1821
} \
1822
for (octave_idx_type k = 0; k < nr; k++) \
1823
if (w[k] == i + 1) \
1824
{ \
1825
retval.xdata (ii) = Xcol[k]; \
1826
retval.xridx (ii++) = k; \
1827
} \
1828
} \
1829
else \
1830
{ \
1831
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1832
{ \
1833
octave_idx_type col = a.ridx (j); \
1834
EL_TYPE tmpval = a.data (j); \
1835
for (octave_idx_type k = m.cidx (col) ; \
1836
k < m.cidx (col+1); k++) \
1837
{ \
1838
octave_quit (); \
1839
octave_idx_type row = m.ridx (k); \
1840
if (w[row] < i + 1) \
1841
{ \
1842
w[row] = i + 1; \
1843
retval.xridx (ii++) = row; \
1844
Xcol[row] = tmpval * m.data (k); \
1845
} \
1846
else \
1847
Xcol[row] += tmpval * m.data (k); \
1848
} \
1849
} \
1850
sort.sort (ri + retval.xcidx (i), ii - retval.xcidx (i)); \
1851
for (octave_idx_type k = retval.xcidx (i); k < ii; k++) \
1852
retval.xdata (k) = Xcol[retval.xridx (k)]; \
1853
} \
1854
} \
1855
retval.maybe_compress (true); \
1856
return retval; \
1857
} \
1858
}
1859
1860
#define SPARSE_FULL_MUL(RET_TYPE, EL_TYPE, ZERO) \
1861
octave_idx_type nr = m.rows (); \
1862
octave_idx_type nc = m.cols (); \
1863
\
1864
octave_idx_type a_nr = a.rows (); \
1865
octave_idx_type a_nc = a.cols (); \
1866
\
1867
if (nr == 1 && nc == 1) \
1868
{ \
1869
RET_TYPE retval = m.elem (0,0) * a; \
1870
return retval; \
1871
} \
1872
else if (nc != a_nr) \
1873
octave::err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
1874
else \
1875
{ \
1876
RET_TYPE retval (nr, a_nc, ZERO); \
1877
\
1878
for (octave_idx_type i = 0; i < a_nc ; i++) \
1879
{ \
1880
for (octave_idx_type j = 0; j < a_nr; j++) \
1881
{ \
1882
octave_quit (); \
1883
\
1884
EL_TYPE tmpval = a.elem (j,i); \
1885
for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \
1886
retval.elem (m.ridx (k),i) += tmpval * m.data (k); \
1887
} \
1888
} \
1889
return retval; \
1890
}
1891
1892
#define SPARSE_FULL_TRANS_MUL(RET_TYPE, EL_TYPE, ZERO, CONJ_OP) \
1893
octave_idx_type nr = m.rows (); \
1894
octave_idx_type nc = m.cols (); \
1895
\
1896
octave_idx_type a_nr = a.rows (); \
1897
octave_idx_type a_nc = a.cols (); \
1898
\
1899
if (nr == 1 && nc == 1) \
1900
{ \
1901
RET_TYPE retval = CONJ_OP (m.elem (0,0)) * a; \
1902
return retval; \
1903
} \
1904
else if (nr != a_nr) \
1905
octave::err_nonconformant ("operator *", nc, nr, a_nr, a_nc); \
1906
else \
1907
{ \
1908
RET_TYPE retval (nc, a_nc); \
1909
\
1910
for (octave_idx_type i = 0; i < a_nc ; i++) \
1911
{ \
1912
for (octave_idx_type j = 0; j < nc; j++) \
1913
{ \
1914
octave_quit (); \
1915
\
1916
EL_TYPE acc = ZERO; \
1917
for (octave_idx_type k = m.cidx (j) ; k < m.cidx (j+1); k++) \
1918
acc += a.elem (m.ridx (k),i) * CONJ_OP (m.data (k)); \
1919
retval.xelem (j,i) = acc; \
1920
} \
1921
} \
1922
return retval; \
1923
}
1924
1925
#define FULL_SPARSE_MUL(RET_TYPE, EL_TYPE, ZERO) \
1926
octave_idx_type nr = m.rows (); \
1927
octave_idx_type nc = m.cols (); \
1928
\
1929
octave_idx_type a_nr = a.rows (); \
1930
octave_idx_type a_nc = a.cols (); \
1931
\
1932
if (a_nr == 1 && a_nc == 1) \
1933
{ \
1934
RET_TYPE retval = m * a.elem (0,0); \
1935
return retval; \
1936
} \
1937
else if (nc != a_nr) \
1938
octave::err_nonconformant ("operator *", nr, nc, a_nr, a_nc); \
1939
else \
1940
{ \
1941
RET_TYPE retval (nr, a_nc, ZERO); \
1942
\
1943
for (octave_idx_type i = 0; i < a_nc ; i++) \
1944
{ \
1945
octave_quit (); \
1946
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1947
{ \
1948
octave_idx_type col = a.ridx (j); \
1949
EL_TYPE tmpval = a.data (j); \
1950
\
1951
for (octave_idx_type k = 0 ; k < nr; k++) \
1952
retval.xelem (k,i) += tmpval * m.elem (k,col); \
1953
} \
1954
} \
1955
return retval; \
1956
}
1957
1958
#define FULL_SPARSE_MUL_TRANS(RET_TYPE, EL_TYPE, ZERO, CONJ_OP) \
1959
octave_idx_type nr = m.rows (); \
1960
octave_idx_type nc = m.cols (); \
1961
\
1962
octave_idx_type a_nr = a.rows (); \
1963
octave_idx_type a_nc = a.cols (); \
1964
\
1965
if (a_nr == 1 && a_nc == 1) \
1966
{ \
1967
RET_TYPE retval = m * CONJ_OP (a.elem (0,0)); \
1968
return retval; \
1969
} \
1970
else if (nc != a_nc) \
1971
octave::err_nonconformant ("operator *", nr, nc, a_nc, a_nr); \
1972
else \
1973
{ \
1974
RET_TYPE retval (nr, a_nr, ZERO); \
1975
\
1976
for (octave_idx_type i = 0; i < a_nc ; i++) \
1977
{ \
1978
octave_quit (); \
1979
for (octave_idx_type j = a.cidx (i); j < a.cidx (i+1); j++) \
1980
{ \
1981
octave_idx_type col = a.ridx (j); \
1982
EL_TYPE tmpval = CONJ_OP (a.data (j)); \
1983
for (octave_idx_type k = 0 ; k < nr; k++) \
1984
retval.xelem (k,col) += tmpval * m.elem (k,i); \
1985
} \
1986
} \
1987
return retval; \
1988
}
1989
1990
#endif
oct-locbuf.h
Array-util.h
mx-inlines.cc
Generated on Wed May 10 2017 15:42:59 for GNU Octave by
1.8.8